diff --git a/package.json b/package.json index efd16e9..13d2e67 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@shareai-lab/kode", - "version": "1.0.76", + "version": "1.0.80", "bin": { "kode": "cli.js", "kwa": "cli.js", diff --git a/scripts/prepublish-check.js b/scripts/prepublish-check.js index 165fd55..9b06d76 100755 --- a/scripts/prepublish-check.js +++ b/scripts/prepublish-check.js @@ -30,13 +30,7 @@ if (!pkg.bin || !pkg.bin.kode) { process.exit(1); } -// Skip bundled check if SKIP_BUNDLED_CHECK is set (for publish workaround) -if (process.env.SKIP_BUNDLED_CHECK !== 'true') { - if (!pkg.bundledDependencies || !pkg.bundledDependencies.includes('tsx')) { - console.error('❌ tsx not in bundledDependencies'); - process.exit(1); - } -} +// Bundled dependencies check removed - not needed for this package structure console.log('✅ All checks passed!'); console.log('\n📋 Package info:'); diff --git a/src/constants/prompts.ts b/src/constants/prompts.ts index 4e6fb23..520bfe8 100644 --- a/src/constants/prompts.ts +++ b/src/constants/prompts.ts @@ -9,25 +9,17 @@ import { PRODUCT_NAME, PROJECT_FILE, PRODUCT_COMMAND } from './product' import { BashTool } from '../tools/BashTool/BashTool' import { MACRO } from './macros' -// Core identity constant matching reference implementation (ga0) -export function getCoreIdentity(): string { - return `You are ${PRODUCT_NAME}, Anthropic's official CLI for Claude.` -} - -// Security policy constant matching reference implementation (va0) -export const SECURITY_POLICY = - 'IMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.' +// // Security policy constant matching reference implementation +// export const SECURITY_POLICY = +// 'IMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.' export function getCLISyspromptPrefix(): string { - return `You are ${PRODUCT_NAME}, a CLI for coding.` + return `You are ${PRODUCT_NAME}, ShareAI-lab's Agent AI CLI for terminal & coding.` } export async function getSystemPrompt(): Promise { return [ - `${getCoreIdentity()} - -${SECURITY_POLICY} - + ` You are an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user. IMPORTANT: Refuse to write code or explain code that may be used maliciously; even if the user claims it is for educational purposes. When working on files, if they seem related to improving, explaining, or interacting with malware or any malicious code you MUST refuse. @@ -164,8 +156,7 @@ Today's date: ${new Date().toLocaleDateString()} export async function getAgentPrompt(): Promise { return [ - `${getCoreIdentity()} - + ` You are an agent for ${PRODUCT_NAME}. Given the user's prompt, you should use the tools available to you to answer the user's question. Notes: diff --git a/src/services/claude.ts b/src/services/claude.ts index c6a97a1..e74afe7 100644 --- a/src/services/claude.ts +++ b/src/services/claude.ts @@ -1395,9 +1395,9 @@ async function queryAnthropicNative( tools.map(async tool => ({ name: tool.name, - description: await tool.prompt({ - safeMode: options?.safeMode, - }), + description: typeof tool.description === 'function' + ? await tool.description() + : tool.description, input_schema: zodToJsonSchema(tool.inputSchema), }) as unknown as Anthropic.Beta.Messages.BetaTool, ) @@ -1744,7 +1744,7 @@ async function queryOpenAI( : '', }) - systemPrompt = [getCLISyspromptPrefix(), ...systemPrompt] + systemPrompt = [getCLISyspromptPrefix() + systemPrompt] // some openai-like providers need the entire system prompt as a single block } const system: TextBlockParam[] = splitSysPromptPrefix(systemPrompt).map( diff --git a/src/services/openai.ts b/src/services/openai.ts index 5010f8b..a29ed67 100644 --- a/src/services/openai.ts +++ b/src/services/openai.ts @@ -3,7 +3,7 @@ import { getGlobalConfig, GlobalConfig } from '../utils/config' import { ProxyAgent, fetch, Response } from 'undici' import { setSessionState, getSessionState } from '../utils/sessionState' import { logEvent } from '../services/statsig' -import { debug as debugLogger, getCurrentRequest } from '../utils/debugLogger' +import { debug as debugLogger, getCurrentRequest, logAPIError } from '../utils/debugLogger' // Helper function to calculate retry delay with exponential backoff function getRetryDelay(attempt: number, retryAfter?: string | null): number { @@ -637,9 +637,31 @@ export async function getCompletionWithProfile( // If no specific handler found, log the error for debugging console.log(`⚠️ Unhandled API error (${response.status}): ${errorMessage}`) + + // Log API error using unified logger + logAPIError({ + model: opts.model, + endpoint: `${baseURL}${endpoint}`, + status: response.status, + error: errorMessage, + request: opts, + response: errorData, + provider: provider + }) } catch (parseError) { // If we can't parse the error, fall back to generic retry console.log(`⚠️ Could not parse error response (${response.status})`) + + // Log parse error + logAPIError({ + model: opts.model, + endpoint: `${baseURL}${endpoint}`, + status: response.status, + error: `Could not parse error response: ${parseError.message}`, + request: opts, + response: { parseError: parseError.message }, + provider: provider + }) } const delayMs = getRetryDelay(attempt) diff --git a/src/utils/debugLogger.ts b/src/utils/debugLogger.ts index 388b4f9..dfb9dbc 100644 --- a/src/utils/debugLogger.ts +++ b/src/utils/debugLogger.ts @@ -457,6 +457,112 @@ export function logReminderEvent( }) } +// API错误日志功能 +export function logAPIError(context: { + model: string + endpoint: string + status: number + error: any + request?: any + response?: any + provider?: string +}) { + const errorDir = join(paths.cache, getProjectDir(process.cwd()), 'logs', 'error', 'api') + + // 确保目录存在 + if (!existsSync(errorDir)) { + mkdirSync(errorDir, { recursive: true }) + } + + // 生成文件名 + const timestamp = new Date().toISOString().replace(/[:.]/g, '-') + const sanitizedModel = context.model.replace(/[^a-zA-Z0-9-_]/g, '_') + const filename = `${sanitizedModel}_${timestamp}.log` + const filepath = join(errorDir, filename) + + // 准备完整的日志内容(文件中保存所有信息) + const fullLogContent = { + timestamp: new Date().toISOString(), + sessionId: SESSION_ID, + requestId: getCurrentRequest()?.id, + model: context.model, + provider: context.provider, + endpoint: context.endpoint, + status: context.status, + error: context.error, + request: context.request, // 保存完整请求 + response: context.response, // 保存完整响应 + environment: { + nodeVersion: process.version, + platform: process.platform, + cwd: process.cwd(), + } + } + + // 写入文件(保存完整信息) + try { + appendFileSync(filepath, JSON.stringify(fullLogContent, null, 2) + '\n') + appendFileSync(filepath, '='.repeat(80) + '\n\n') + } catch (err) { + console.error('Failed to write API error log:', err) + } + + // 在调试模式下记录到系统日志 + if (isDebugMode()) { + debug.error('API_ERROR', { + model: context.model, + status: context.status, + error: typeof context.error === 'string' ? context.error : context.error?.message || 'Unknown error', + endpoint: context.endpoint, + logFile: filename, + }) + } + + // 优雅的终端显示(仅在verbose模式下) + if (isVerboseMode() || isDebugVerboseMode()) { + console.log() + console.log(chalk.red('━'.repeat(60))) + console.log(chalk.red.bold('⚠️ API Error')) + console.log(chalk.red('━'.repeat(60))) + + // 显示关键信息 + console.log(chalk.white(' Model: ') + chalk.yellow(context.model)) + console.log(chalk.white(' Status: ') + chalk.red(context.status)) + + // 格式化错误消息 + let errorMessage = 'Unknown error' + if (typeof context.error === 'string') { + errorMessage = context.error + } else if (context.error?.message) { + errorMessage = context.error.message + } else if (context.error?.error?.message) { + errorMessage = context.error.error.message + } + + // 错误消息换行显示 + console.log(chalk.white(' Error: ') + chalk.red(errorMessage)) + + // 如果有响应体,显示格式化的响应 + if (context.response) { + console.log() + console.log(chalk.gray(' Response:')) + const responseStr = typeof context.response === 'string' + ? context.response + : JSON.stringify(context.response, null, 2) + + // 缩进显示响应内容 + responseStr.split('\n').forEach(line => { + console.log(chalk.gray(' ' + line)) + }) + } + + console.log() + console.log(chalk.dim(` 📁 Full log: ~/.kode/logs/error/api/${filename}`)) + console.log(chalk.red('━'.repeat(60))) + console.log() + } +} + // 新增:LLM 交互核心调试信息 export function logLLMInteraction(context: { systemPrompt: string