Initial vibecoded proof of concept
This commit is contained in:
parent
74812459af
commit
461318a656
61 changed files with 13306 additions and 0 deletions
402
lua/notex/utils/errors.lua
Normal file
402
lua/notex/utils/errors.lua
Normal file
|
@ -0,0 +1,402 @@
|
|||
-- Centralized error handling and recovery system
|
||||
local M = {}
|
||||
|
||||
local logging = require('notex.utils.logging')
|
||||
|
||||
-- Error types with specific handling strategies
|
||||
local ERROR_TYPES = {
|
||||
DATABASE_CONNECTION = {
|
||||
category = "database",
|
||||
recoverable = true,
|
||||
retry_strategy = "exponential_backoff",
|
||||
max_retries = 3,
|
||||
user_message = "Database connection error. Retrying..."
|
||||
},
|
||||
DATABASE_QUERY = {
|
||||
category = "database",
|
||||
recoverable = false,
|
||||
retry_strategy = "none",
|
||||
max_retries = 0,
|
||||
user_message = "Query execution failed. Please check your query syntax."
|
||||
},
|
||||
FILE_NOT_FOUND = {
|
||||
category = "filesystem",
|
||||
recoverable = true,
|
||||
retry_strategy = "immediate",
|
||||
max_retries = 1,
|
||||
user_message = "File not found. It may have been moved or deleted."
|
||||
},
|
||||
FILE_PARSE_ERROR = {
|
||||
category = "parsing",
|
||||
recoverable = false,
|
||||
retry_strategy = "none",
|
||||
max_retries = 0,
|
||||
user_message = "Failed to parse file. Please check the file format."
|
||||
},
|
||||
QUERY_SYNTAX_ERROR = {
|
||||
category = "query",
|
||||
recoverable = false,
|
||||
retry_strategy = "none",
|
||||
max_retries = 0,
|
||||
user_message = "Query syntax error. Please check your query syntax."
|
||||
},
|
||||
VALIDATION_ERROR = {
|
||||
category = "validation",
|
||||
recoverable = false,
|
||||
retry_strategy = "none",
|
||||
max_retries = 0,
|
||||
user_message = "Validation error. Please check your input."
|
||||
},
|
||||
UI_ERROR = {
|
||||
category = "ui",
|
||||
recoverable = true,
|
||||
retry_strategy = "immediate",
|
||||
max_retries = 1,
|
||||
user_message = "UI error. Attempting to recover..."
|
||||
},
|
||||
PERMISSION_ERROR = {
|
||||
category = "filesystem",
|
||||
recoverable = false,
|
||||
retry_strategy = "none",
|
||||
max_retries = 0,
|
||||
user_message = "Permission denied. Please check file permissions."
|
||||
},
|
||||
NETWORK_ERROR = {
|
||||
category = "network",
|
||||
recoverable = true,
|
||||
retry_strategy = "exponential_backoff",
|
||||
max_retries = 3,
|
||||
user_message = "Network error. Retrying..."
|
||||
},
|
||||
PERFORMANCE_TIMEOUT = {
|
||||
category = "performance",
|
||||
recoverable = true,
|
||||
retry_strategy = "immediate",
|
||||
max_retries = 1,
|
||||
user_message = "Operation timed out. Retrying with simpler approach..."
|
||||
}
|
||||
}
|
||||
|
||||
-- Error state tracking
|
||||
local error_state = {
|
||||
recent_errors = {},
|
||||
error_counts = {},
|
||||
last_recovery_attempt = {},
|
||||
recovery_in_progress = {}
|
||||
}
|
||||
|
||||
-- Create standardized error object
|
||||
function M.create_error(error_type, message, context, original_error)
|
||||
local error_def = ERROR_TYPES[error_type] or ERROR_TYPES.UI_ERROR
|
||||
local error_obj = {
|
||||
type = error_type,
|
||||
message = message,
|
||||
context = context or {},
|
||||
original_error = original_error,
|
||||
timestamp = os.time(),
|
||||
recoverable = error_def.recoverable,
|
||||
category = error_def.category,
|
||||
user_message = error_def.user_message,
|
||||
retry_strategy = error_def.retry_strategy,
|
||||
max_retries = error_def.max_retries,
|
||||
error_id = M.generate_error_id()
|
||||
}
|
||||
|
||||
-- Track error
|
||||
M.track_error(error_obj)
|
||||
|
||||
return error_obj
|
||||
end
|
||||
|
||||
-- Generate unique error ID
|
||||
function M.generate_error_id()
|
||||
return string.format("ERR_%d_%s", os.time(), math.random(1000, 9999))
|
||||
end
|
||||
|
||||
-- Track error occurrence
|
||||
function M.track_error(error_obj)
|
||||
-- Add to recent errors
|
||||
table.insert(error_state.recent_errors, error_obj)
|
||||
|
||||
-- Keep only last 50 errors
|
||||
if #error_state.recent_errors > 50 then
|
||||
table.remove(error_state.recent_errors, 1)
|
||||
end
|
||||
|
||||
-- Update error counts
|
||||
local key = error_obj.type
|
||||
error_state.error_counts[key] = (error_state.error_counts[key] or 0) + 1
|
||||
|
||||
-- Log the error
|
||||
logging.handle_error(error_obj.message, error_obj.category, error_obj)
|
||||
end
|
||||
|
||||
-- Check if error should be retried
|
||||
function M.should_retry(error_obj, current_attempt)
|
||||
if not error_obj.recoverable then
|
||||
return false, "Error is not recoverable"
|
||||
end
|
||||
|
||||
if current_attempt >= error_obj.max_retries then
|
||||
return false, "Maximum retries exceeded"
|
||||
end
|
||||
|
||||
-- Check if we recently attempted recovery for this error type
|
||||
local last_attempt = error_state.last_recovery_attempt[error_obj.type]
|
||||
if last_attempt and (os.time() - last_attempt) < 5 then
|
||||
return false, "Recovery attempt too recent"
|
||||
end
|
||||
|
||||
return true, "Retry allowed"
|
||||
end
|
||||
|
||||
-- Execute operation with error handling and recovery
|
||||
function M.safe_execute(operation, error_type, context, func, ...)
|
||||
local current_attempt = 0
|
||||
local max_attempts = (ERROR_TYPES[error_type] and ERROR_TYPES[error_type].max_retries or 0) + 1
|
||||
|
||||
while current_attempt < max_attempts do
|
||||
local success, result = pcall(func, ...)
|
||||
|
||||
if success then
|
||||
-- Reset recovery state on success
|
||||
error_state.last_recovery_attempt[error_type] = nil
|
||||
error_state.recovery_in_progress[error_type] = nil
|
||||
return true, result
|
||||
else
|
||||
current_attempt = current_attempt + 1
|
||||
local error_obj = M.create_error(error_type, result, context)
|
||||
|
||||
local should_retry, retry_reason = M.should_retry(error_obj, current_attempt)
|
||||
|
||||
if should_retry and current_attempt < max_attempts then
|
||||
error_state.last_recovery_attempt[error_type] = os.time()
|
||||
error_state.recovery_in_progress[error_type] = true
|
||||
|
||||
-- Show user message
|
||||
if error_obj.user_message then
|
||||
vim.notify(error_obj.user_message, vim.log.levels.WARN)
|
||||
end
|
||||
|
||||
-- Apply retry strategy
|
||||
M.apply_retry_strategy(error_obj.retry_strategy, current_attempt)
|
||||
|
||||
logging.info("Retrying operation", {
|
||||
operation = operation,
|
||||
attempt = current_attempt,
|
||||
error_type = error_type,
|
||||
reason = retry_reason
|
||||
})
|
||||
else
|
||||
-- Final failure
|
||||
error_state.recovery_in_progress[error_type] = nil
|
||||
|
||||
-- Show final error message
|
||||
M.show_final_error(error_obj, current_attempt)
|
||||
|
||||
return false, error_obj
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return false, "Operation failed after all retry attempts"
|
||||
end
|
||||
|
||||
-- Apply retry strategy
|
||||
function M.apply_retry_strategy(strategy, attempt)
|
||||
if strategy == "immediate" then
|
||||
-- No delay
|
||||
elseif strategy == "exponential_backoff" then
|
||||
local delay = math.min(2 ^ attempt, 10) -- Cap at 10 seconds
|
||||
vim.defer_fn(function() end, delay * 1000)
|
||||
elseif strategy == "linear_backoff" then
|
||||
local delay = attempt * 1000 -- 1 second per attempt
|
||||
vim.defer_fn(function() end, delay)
|
||||
end
|
||||
end
|
||||
|
||||
-- Show final error to user
|
||||
function M.show_final_error(error_obj, attempt_count)
|
||||
local message = string.format("%s (%d attempts made)", error_obj.user_message or error_obj.message, attempt_count)
|
||||
|
||||
if error_obj.category == "validation" or error_obj.category == "query" then
|
||||
vim.notify(message, vim.log.levels.ERROR)
|
||||
elseif error_obj.category == "filesystem" or error_obj.category == "database" then
|
||||
vim.notify(message, vim.log.levels.ERROR)
|
||||
else
|
||||
vim.notify(message, vim.log.levels.WARN)
|
||||
end
|
||||
end
|
||||
|
||||
-- Wrap function for safe execution
|
||||
function M.wrap(operation_name, error_type, func)
|
||||
return function(...)
|
||||
return M.safe_execute(operation_name, error_type, {operation = operation_name}, func, ...)
|
||||
end
|
||||
end
|
||||
|
||||
-- Handle specific error types with custom recovery
|
||||
local error_handlers = {}
|
||||
|
||||
function M.register_error_handler(error_type, handler)
|
||||
error_handlers[error_type] = handler
|
||||
end
|
||||
|
||||
function M.handle_specific_error(error_obj)
|
||||
local handler = error_handlers[error_obj.type]
|
||||
if handler then
|
||||
local success, result = pcall(handler, error_obj)
|
||||
if success then
|
||||
return result
|
||||
else
|
||||
logging.error("Error handler failed", {
|
||||
error_type = error_obj.type,
|
||||
handler_error = result
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
-- Register default error handlers
|
||||
M.register_error_handler("DATABASE_CONNECTION", function(error_obj)
|
||||
-- Try to reinitialize database connection
|
||||
local database = require('notex.database.init')
|
||||
local ok, err = database.reconnect()
|
||||
if ok then
|
||||
vim.notify("Database connection restored", vim.log.levels.INFO)
|
||||
return true
|
||||
end
|
||||
return false
|
||||
end)
|
||||
|
||||
M.register_error_handler("FILE_NOT_FOUND", function(error_obj)
|
||||
-- Remove from index if file no longer exists
|
||||
if error_obj.context and error_obj.context.file_path then
|
||||
local indexer = require('notex.index')
|
||||
local ok, err = indexer.remove_document_by_path(error_obj.context.file_path)
|
||||
if ok then
|
||||
vim.notify("Removed missing file from index", vim.log.levels.INFO)
|
||||
return true
|
||||
end
|
||||
end
|
||||
return false
|
||||
end)
|
||||
|
||||
M.register_error_handler("UI_ERROR", function(error_obj)
|
||||
-- Try to cleanup UI state
|
||||
local ui = require('notex.ui')
|
||||
ui.cleanup_all()
|
||||
vim.notify("UI state reset", vim.log.levels.INFO)
|
||||
return true
|
||||
end)
|
||||
|
||||
-- Get error statistics
|
||||
function M.get_error_statistics()
|
||||
local stats = {
|
||||
total_errors = 0,
|
||||
by_type = vim.deepcopy(error_state.error_counts),
|
||||
recent_errors = vim.list_slice(error_state.recent_errors, -10), -- Last 10 errors
|
||||
recovery_in_progress = vim.deepcopy(error_state.recovery_in_progress)
|
||||
}
|
||||
|
||||
-- Calculate total
|
||||
for _, count in pairs(error_state.error_counts) do
|
||||
stats.total_errors = stats.total_errors + count
|
||||
end
|
||||
|
||||
-- Get error rate in last hour
|
||||
local one_hour_ago = os.time() - 3600
|
||||
local recent_count = 0
|
||||
for _, error in ipairs(error_state.recent_errors) do
|
||||
if error.timestamp > one_hour_ago then
|
||||
recent_count = recent_count + 1
|
||||
end
|
||||
end
|
||||
stats.errors_per_hour = recent_count
|
||||
|
||||
return stats
|
||||
end
|
||||
|
||||
-- Clear error history
|
||||
function M.clear_error_history()
|
||||
error_state.recent_errors = {}
|
||||
error_state.error_counts = {}
|
||||
error_state.last_recovery_attempt = {}
|
||||
error_state.recovery_in_progress = {}
|
||||
|
||||
logging.info("Error history cleared")
|
||||
end
|
||||
|
||||
-- Check system health based on errors
|
||||
function M.check_system_health()
|
||||
local stats = M.get_error_statistics()
|
||||
local health = {
|
||||
status = "healthy",
|
||||
issues = {},
|
||||
recommendations = {}
|
||||
}
|
||||
|
||||
-- Check error rate
|
||||
if stats.errors_per_hour > 10 then
|
||||
health.status = "degraded"
|
||||
table.insert(health.issues, "High error rate: " .. stats.errors_per_hour .. " errors/hour")
|
||||
table.insert(health.recommendations, "Check system logs for recurring issues")
|
||||
end
|
||||
|
||||
-- Check for stuck recovery operations
|
||||
local stuck_recoveries = 0
|
||||
for error_type, in_progress in pairs(error_state.recovery_in_progress) do
|
||||
if in_progress then
|
||||
stuck_recoveries = stuck_recoveries + 1
|
||||
end
|
||||
end
|
||||
|
||||
if stuck_recoveries > 0 then
|
||||
health.status = "degraded"
|
||||
table.insert(health.issues, stuck_recoveries .. " recovery operations in progress")
|
||||
table.insert(health.recommendations, "Consider restarting the plugin")
|
||||
end
|
||||
|
||||
-- Check for specific error patterns
|
||||
local db_errors = error_state.error_counts["DATABASE_CONNECTION"] or 0
|
||||
local file_errors = error_state.error_counts["FILE_NOT_FOUND"] or 0
|
||||
|
||||
if db_errors > 5 then
|
||||
health.status = "unhealthy"
|
||||
table.insert(health.issues, "Frequent database connection errors")
|
||||
table.insert(health.recommendations, "Check database file permissions and disk space")
|
||||
end
|
||||
|
||||
if file_errors > 10 then
|
||||
table.insert(health.issues, "Many file not found errors")
|
||||
table.insert(health.recommendations, "Consider reindexing the workspace")
|
||||
end
|
||||
|
||||
return health
|
||||
end
|
||||
|
||||
-- Create user-friendly error messages
|
||||
function M.format_error_for_user(error_obj)
|
||||
local message = error_obj.user_message or error_obj.message
|
||||
|
||||
-- Add contextual information
|
||||
if error_obj.context.operation then
|
||||
message = message .. " (during: " .. error_obj.context.operation .. ")"
|
||||
end
|
||||
|
||||
if error_obj.context.file_path then
|
||||
message = message .. " (file: " .. vim.fn.fnamemodify(error_obj.context.file_path, ":t") .. ")"
|
||||
end
|
||||
|
||||
-- Add error ID for support
|
||||
message = message .. " [ID: " .. error_obj.error_id .. "]"
|
||||
|
||||
return message
|
||||
end
|
||||
|
||||
-- Export error types for use in other modules
|
||||
M.ERROR_TYPES = ERROR_TYPES
|
||||
|
||||
return M
|
Loading…
Add table
Add a link
Reference in a new issue