Code Coverage |
||||||||||
Lines |
Functions and Methods |
Classes and Traits |
||||||||
| Total | |
89.53% |
77 / 86 |
|
75.00% |
3 / 4 |
CRAP | |
0.00% |
0 / 1 |
| NodeJsAIBridgeService | |
89.53% |
77 / 86 |
|
75.00% |
3 / 4 |
11.14 | |
0.00% |
0 / 1 |
| __construct | |
100.00% |
4 / 4 |
|
100.00% |
1 / 1 |
1 | |||
| generate | |
88.16% |
67 / 76 |
|
0.00% |
0 / 1 |
8.11 | |||
| estimateTokens | |
100.00% |
1 / 1 |
|
100.00% |
1 / 1 |
1 | |||
| calculateCost | |
100.00% |
5 / 5 |
|
100.00% |
1 / 1 |
1 | |||
| 1 | <?php |
| 2 | |
| 3 | namespace App\Http\Services; |
| 4 | |
| 5 | use App\Http\Repositories\AIRequestLogRepository; |
| 6 | use Exception; |
| 7 | use GuzzleHttp\Client; |
| 8 | use GuzzleHttp\Exception\GuzzleException; |
| 9 | use Illuminate\Support\Facades\Log; |
| 10 | |
| 11 | /** |
| 12 | * Bridge service that routes AI generation requests to the Node.js sidecar, |
| 13 | * then persists a detailed log entry in the `ai_request_logs` MongoDB collection. |
| 14 | * |
| 15 | * The PHP layer is responsible for prompt construction and response processing. |
| 16 | * The Node.js sidecar handles the actual AI provider API calls. |
| 17 | */ |
| 18 | class NodeJsAIBridgeService |
| 19 | { |
| 20 | /** |
| 21 | * Cost per 1 million tokens by model (USD). |
| 22 | * Keys normalised to lowercase for case-insensitive lookup. |
| 23 | * Pricing source: Google AI pricing page (standard tier, <=200K input tokens). |
| 24 | * |
| 25 | * @var array<string, array{input: float, output: float}> |
| 26 | */ |
| 27 | private const COST_PER_MILLION_TOKENS = [ |
| 28 | // Gemini 3.x models |
| 29 | 'gemini-3.1-pro-preview' => ['input' => 2.00, 'output' => 12.00], |
| 30 | 'gemini-3.1-flash-image-preview' => ['input' => 0.50, 'output' => 3.00], |
| 31 | 'gemini-3.1-flash-lite-preview' => ['input' => 0.25, 'output' => 1.50], |
| 32 | 'gemini-3-pro-preview' => ['input' => 2.00, 'output' => 12.00], |
| 33 | 'gemini-3-flash-preview' => ['input' => 0.50, 'output' => 3.00], |
| 34 | |
| 35 | // Gemini 2.5 models |
| 36 | 'gemini-2.5-pro' => ['input' => 1.25, 'output' => 10.00], |
| 37 | 'gemini-2.5-pro-preview' => ['input' => 1.25, 'output' => 10.00], |
| 38 | 'gemini-2.5-flash' => ['input' => 0.30, 'output' => 2.50], |
| 39 | 'gemini-2.5-flash:streamgeneratecontent' => ['input' => 0.30, 'output' => 2.50], |
| 40 | 'gemini-2.5-flash-lite' => ['input' => 0.10, 'output' => 0.40], |
| 41 | 'gemini-2.5-flash-lite-preview-09-2025' => ['input' => 0.10, 'output' => 0.40], |
| 42 | |
| 43 | // Gemini 2.0 models |
| 44 | 'gemini-2.0-flash' => ['input' => 0.15, 'output' => 0.60], |
| 45 | 'gemini-2.0-flash:streamgeneratecontent' => ['input' => 0.15, 'output' => 0.60], |
| 46 | 'gemini-2.0-flash-lite' => ['input' => 0.075, 'output' => 0.30], |
| 47 | |
| 48 | // Gemini 1.5 models (legacy) |
| 49 | 'gemini-1.5-flash' => ['input' => 0.075, 'output' => 0.30], |
| 50 | 'gemini-1.5-flash:streamgeneratecontent' => ['input' => 0.075, 'output' => 0.30], |
| 51 | 'gemini-1.5-pro' => ['input' => 1.25, 'output' => 5.00], |
| 52 | |
| 53 | 'default' => ['input' => 0.15, 'output' => 0.60], |
| 54 | ]; |
| 55 | |
| 56 | private Client $httpClient; |
| 57 | |
| 58 | public function __construct( |
| 59 | private readonly AIRequestLogRepository $logRepository |
| 60 | ) { |
| 61 | $this->httpClient = new Client([ |
| 62 | 'timeout' => 120, |
| 63 | 'connect_timeout' => 10, |
| 64 | ]); |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * Send a generation request to the Node.js AI bridge and log the result. |
| 69 | * |
| 70 | * @param array{ |
| 71 | * provider: string, |
| 72 | * model: string, |
| 73 | * prompt: string, |
| 74 | * systemInstruction?: string, |
| 75 | * config: array{ |
| 76 | * maxOutputTokens: int, |
| 77 | * temperature: float, |
| 78 | * topP: float, |
| 79 | * thinkingBudget?: int, |
| 80 | * enableGoogleSearch?: bool, |
| 81 | * safetySettings?: array |
| 82 | * }, |
| 83 | * youtubeUrl?: string|null |
| 84 | * } $payload AI generation parameters |
| 85 | * @param array{ |
| 86 | * user_id?: string|null, |
| 87 | * company_id?: string|null, |
| 88 | * feature?: string, |
| 89 | * context?: mixed, |
| 90 | * prompt_id?: string|null, |
| 91 | * regenerated?: bool, |
| 92 | * regenerated_from_id?: string|null |
| 93 | * } $metadata Context for logging (user, feature, etc.). `prompt_id` |
| 94 | * is the ai_prompts record whose config drove this call; |
| 95 | * it is persisted alongside the resolved config values so |
| 96 | * operators can audit that prompt settings were honored. |
| 97 | * @return string The raw generated text from the AI |
| 98 | * |
| 99 | * @throws Exception When the Node.js bridge returns an error |
| 100 | */ |
| 101 | public function generate(array $payload, array $metadata = []): string |
| 102 | { |
| 103 | $userId = $metadata['user_id'] ?? null; |
| 104 | $companyId = $metadata['company_id'] ?? null; |
| 105 | $feature = $metadata['feature'] ?? 'unknown'; |
| 106 | $context = $metadata['context'] ?? null; |
| 107 | $provider = $payload['provider']; |
| 108 | $model = $payload['model']; |
| 109 | $config = $payload['config']; |
| 110 | |
| 111 | // Create a pending log entry before making the API call. |
| 112 | // The config fields are persisted so operators can audit that the |
| 113 | // ai_prompts record's configuration was actually honored. The |
| 114 | // required keys (maxOutputTokens/temperature/topP) come straight |
| 115 | // from the shape; the optional keys (thinkingBudget / grounding) |
| 116 | // are still guarded. |
| 117 | $logData = [ |
| 118 | 'user_id' => (string) $userId, |
| 119 | 'company_id' => $companyId ? (string) $companyId : null, |
| 120 | 'feature' => $feature, |
| 121 | 'provider' => $provider, |
| 122 | 'model' => $model, |
| 123 | 'prompt_input' => $payload['prompt'], |
| 124 | 'context' => $context, |
| 125 | 'status' => 'pending', |
| 126 | 'prompt_tokens' => 0, |
| 127 | 'completion_tokens' => 0, |
| 128 | 'total_tokens' => 0, |
| 129 | 'estimated_cost_usd' => 0.0, |
| 130 | 'temperature' => (float) $config['temperature'], |
| 131 | 'top_p' => (float) $config['topP'], |
| 132 | 'max_tokens' => (int) $config['maxOutputTokens'], |
| 133 | 'thinking_budget' => isset($config['thinkingBudget']) ? (int) $config['thinkingBudget'] : null, |
| 134 | 'enable_google_search' => isset($config['enableGoogleSearch']) ? (bool) $config['enableGoogleSearch'] : null, |
| 135 | 'prompt_id' => isset($metadata['prompt_id']) ? (string) $metadata['prompt_id'] : null, |
| 136 | ]; |
| 137 | |
| 138 | if (! empty($metadata['regenerated'])) { |
| 139 | $logData['regenerated'] = true; |
| 140 | $logData['regenerated_from_id'] = $metadata['regenerated_from_id'] ?? null; |
| 141 | } |
| 142 | |
| 143 | $log = $this->logRepository->create($logData); |
| 144 | |
| 145 | $bridgeUrl = rtrim(config('services.node_ai_bridge.url', 'http://127.0.0.1:3000'), '/'); |
| 146 | |
| 147 | $payload['apiKey'] = config('services.node_ai_bridge.google_api_key', ''); |
| 148 | |
| 149 | try { |
| 150 | $response = $this->httpClient->post("{$bridgeUrl}/api/ai/generate", [ |
| 151 | 'headers' => [ |
| 152 | 'Content-Type' => 'application/json', |
| 153 | 'Accept' => 'application/json', |
| 154 | ], |
| 155 | 'json' => $payload, |
| 156 | ]); |
| 157 | |
| 158 | $data = json_decode($response->getBody()->getContents(), true); |
| 159 | $text = $data['text'] ?? ''; |
| 160 | $usage = $data['usage'] ?? []; |
| 161 | $durationMs = $data['executionDurationMs'] ?? null; |
| 162 | |
| 163 | $promptTokens = $usage['promptTokens'] ?? $this->estimateTokens($payload['prompt']); |
| 164 | $completionTokens = $usage['completionTokens'] ?? $this->estimateTokens($text); |
| 165 | $totalTokens = $usage['totalTokens'] ?? ($promptTokens + $completionTokens); |
| 166 | |
| 167 | $estimatedCost = $this->calculateCost($model, $promptTokens, $completionTokens); |
| 168 | |
| 169 | $this->logRepository->updateStatus((string) $log->_id, 'success', [ |
| 170 | 'prompt_output' => $text, |
| 171 | 'execution_duration_ms' => $durationMs, |
| 172 | 'prompt_tokens' => $promptTokens, |
| 173 | 'completion_tokens' => $completionTokens, |
| 174 | 'total_tokens' => $totalTokens, |
| 175 | 'estimated_cost_usd' => $estimatedCost, |
| 176 | ]); |
| 177 | |
| 178 | return $text; |
| 179 | } catch (GuzzleException $e) { |
| 180 | Log::error('[NodeJsAIBridgeService] HTTP error calling Node.js bridge', [ |
| 181 | 'error' => $e->getMessage(), |
| 182 | 'feature' => $feature, |
| 183 | 'model' => $model, |
| 184 | ]); |
| 185 | |
| 186 | $this->logRepository->updateStatus((string) $log->_id, 'error', [ |
| 187 | 'prompt_output' => $e->getMessage(), |
| 188 | ]); |
| 189 | |
| 190 | throw new Exception("AI bridge request failed: {$e->getMessage()}", 0, $e); |
| 191 | } catch (Exception $e) { |
| 192 | Log::error('[NodeJsAIBridgeService] Unexpected error', [ |
| 193 | 'error' => $e->getMessage(), |
| 194 | 'feature' => $feature, |
| 195 | ]); |
| 196 | |
| 197 | $this->logRepository->updateStatus((string) $log->_id, 'error', [ |
| 198 | 'prompt_output' => $e->getMessage(), |
| 199 | ]); |
| 200 | |
| 201 | throw $e; |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | /** |
| 206 | * Estimate token count from a string (rough approximation: 1 token ≈ 4 characters). |
| 207 | */ |
| 208 | private function estimateTokens(string $text): int |
| 209 | { |
| 210 | return (int) max(1, ceil(mb_strlen($text) / 4)); |
| 211 | } |
| 212 | |
| 213 | /** |
| 214 | * Calculate estimated cost in USD based on model pricing and token counts. |
| 215 | */ |
| 216 | private function calculateCost(string $model, int $promptTokens, int $completionTokens): float |
| 217 | { |
| 218 | $key = strtolower($model); |
| 219 | $rates = self::COST_PER_MILLION_TOKENS[$key] ?? self::COST_PER_MILLION_TOKENS['default']; |
| 220 | |
| 221 | $inputCost = ($promptTokens / 1_000_000) * $rates['input']; |
| 222 | $outputCost = ($completionTokens / 1_000_000) * $rates['output']; |
| 223 | |
| 224 | return round($inputCost + $outputCost, 8); |
| 225 | } |
| 226 | } |