Code Coverage
 
Lines
Functions and Methods
Classes and Traits
Total
88.00% covered (warning)
88.00%
66 / 75
75.00% covered (warning)
75.00%
3 / 4
CRAP
0.00% covered (danger)
0.00%
0 / 1
NodeJsAIBridgeService
88.00% covered (warning)
88.00%
66 / 75
75.00% covered (warning)
75.00%
3 / 4
7.08
0.00% covered (danger)
0.00%
0 / 1
 __construct
100.00% covered (success)
100.00%
4 / 4
100.00% covered (success)
100.00%
1 / 1
1
 generate
86.15% covered (warning)
86.15%
56 / 65
0.00% covered (danger)
0.00%
0 / 1
4.04
 estimateTokens
100.00% covered (success)
100.00%
1 / 1
100.00% covered (success)
100.00%
1 / 1
1
 calculateCost
100.00% covered (success)
100.00%
5 / 5
100.00% covered (success)
100.00%
1 / 1
1
1<?php
2
3namespace App\Http\Services;
4
5use App\Http\Repositories\AIRequestLogRepository;
6use Exception;
7use GuzzleHttp\Client;
8use GuzzleHttp\Exception\GuzzleException;
9use Illuminate\Support\Facades\Log;
10
11/**
12 * Bridge service that routes AI generation requests to the Node.js sidecar,
13 * then persists a detailed log entry in the `ai_request_logs` MongoDB collection.
14 *
15 * The PHP layer is responsible for prompt construction and response processing.
16 * The Node.js sidecar handles the actual AI provider API calls.
17 */
18class NodeJsAIBridgeService
19{
20    /**
21     * Cost per 1 million tokens by model (USD).
22     * Keys normalised to lowercase for case-insensitive lookup.
23     * Pricing source: Google AI pricing page (standard tier, <=200K input tokens).
24     *
25     * @var array<string, array{input: float, output: float}>
26     */
27    private const COST_PER_MILLION_TOKENS = [
28        // Gemini 3.x models
29        'gemini-3.1-pro-preview' => ['input' => 2.00, 'output' => 12.00],
30        'gemini-3.1-flash-image-preview' => ['input' => 0.50, 'output' => 3.00],
31        'gemini-3.1-flash-lite-preview' => ['input' => 0.25, 'output' => 1.50],
32        'gemini-3-pro-preview' => ['input' => 2.00, 'output' => 12.00],
33        'gemini-3-flash-preview' => ['input' => 0.50, 'output' => 3.00],
34
35        // Gemini 2.5 models
36        'gemini-2.5-pro' => ['input' => 1.25, 'output' => 10.00],
37        'gemini-2.5-pro-preview' => ['input' => 1.25, 'output' => 10.00],
38        'gemini-2.5-flash' => ['input' => 0.30, 'output' => 2.50],
39        'gemini-2.5-flash:streamgeneratecontent' => ['input' => 0.30, 'output' => 2.50],
40        'gemini-2.5-flash-lite' => ['input' => 0.10, 'output' => 0.40],
41        'gemini-2.5-flash-lite-preview-09-2025' => ['input' => 0.10, 'output' => 0.40],
42
43        // Gemini 2.0 models
44        'gemini-2.0-flash' => ['input' => 0.15, 'output' => 0.60],
45        'gemini-2.0-flash:streamgeneratecontent' => ['input' => 0.15, 'output' => 0.60],
46        'gemini-2.0-flash-lite' => ['input' => 0.075, 'output' => 0.30],
47
48        // Gemini 1.5 models (legacy)
49        'gemini-1.5-flash' => ['input' => 0.075, 'output' => 0.30],
50        'gemini-1.5-flash:streamgeneratecontent' => ['input' => 0.075, 'output' => 0.30],
51        'gemini-1.5-pro' => ['input' => 1.25, 'output' => 5.00],
52
53        'default' => ['input' => 0.15, 'output' => 0.60],
54    ];
55
56    private Client $httpClient;
57
58    public function __construct(
59        private readonly AIRequestLogRepository $logRepository
60    ) {
61        $this->httpClient = new Client([
62            'timeout' => 120,
63            'connect_timeout' => 10,
64        ]);
65    }
66
67    /**
68     * Send a generation request to the Node.js AI bridge and log the result.
69     *
70     * @param  array{
71     *     provider: string,
72     *     model: string,
73     *     prompt: string,
74     *     systemInstruction?: string,
75     *     config: array{
76     *         maxOutputTokens: int,
77     *         temperature: float,
78     *         topP: float,
79     *         thinkingBudget?: int,
80     *         enableGoogleSearch?: bool,
81     *         safetySettings?: array
82     *     },
83     *     youtubeUrl?: string|null
84     * }  $payload       AI generation parameters
85     * @param  array{
86     *     user_id?: string|null,
87     *     company_id?: string|null,
88     *     feature?: string,
89     *     context?: mixed
90     * }  $metadata      Context for logging (user, feature, etc.)
91     * @return string The raw generated text from the AI
92     *
93     * @throws Exception When the Node.js bridge returns an error
94     */
95    public function generate(array $payload, array $metadata = []): string
96    {
97        $userId = $metadata['user_id'] ?? null;
98        $companyId = $metadata['company_id'] ?? null;
99        $feature = $metadata['feature'] ?? 'unknown';
100        $context = $metadata['context'] ?? null;
101        $provider = $payload['provider'];
102        $model = $payload['model'];
103
104        // Create a pending log entry before making the API call
105        $log = $this->logRepository->create([
106            'user_id' => (string) $userId,
107            'company_id' => $companyId ? (string) $companyId : null,
108            'feature' => $feature,
109            'provider' => $provider,
110            'model' => $model,
111            'prompt_input' => $payload['prompt'],
112            'context' => $context,
113            'status' => 'pending',
114            'prompt_tokens' => 0,
115            'completion_tokens' => 0,
116            'total_tokens' => 0,
117            'estimated_cost_usd' => 0.0,
118        ]);
119
120        $bridgeUrl = rtrim(config('services.node_ai_bridge.url', 'http://127.0.0.1:3000'), '/');
121
122        $payload['apiKey'] = config('services.node_ai_bridge.google_api_key', '');
123
124        try {
125            $response = $this->httpClient->post("{$bridgeUrl}/api/ai/generate", [
126                'headers' => [
127                    'Content-Type' => 'application/json',
128                    'Accept' => 'application/json',
129                ],
130                'json' => $payload,
131            ]);
132
133            $data = json_decode($response->getBody()->getContents(), true);
134            $text = $data['text'] ?? '';
135            $usage = $data['usage'] ?? [];
136            $durationMs = $data['executionDurationMs'] ?? null;
137
138            $promptTokens = $usage['promptTokens'] ?? $this->estimateTokens($payload['prompt']);
139            $completionTokens = $usage['completionTokens'] ?? $this->estimateTokens($text);
140            $totalTokens = $usage['totalTokens'] ?? ($promptTokens + $completionTokens);
141
142            $estimatedCost = $this->calculateCost($model, $promptTokens, $completionTokens);
143
144            $this->logRepository->updateStatus((string) $log->_id, 'success', [
145                'prompt_output' => $text,
146                'execution_duration_ms' => $durationMs,
147                'prompt_tokens' => $promptTokens,
148                'completion_tokens' => $completionTokens,
149                'total_tokens' => $totalTokens,
150                'estimated_cost_usd' => $estimatedCost,
151            ]);
152
153            return $text;
154        } catch (GuzzleException $e) {
155            Log::error('[NodeJsAIBridgeService] HTTP error calling Node.js bridge', [
156                'error' => $e->getMessage(),
157                'feature' => $feature,
158                'model' => $model,
159            ]);
160
161            $this->logRepository->updateStatus((string) $log->_id, 'error', [
162                'prompt_output' => $e->getMessage(),
163            ]);
164
165            throw new Exception("AI bridge request failed: {$e->getMessage()}", 0, $e);
166        } catch (Exception $e) {
167            Log::error('[NodeJsAIBridgeService] Unexpected error', [
168                'error' => $e->getMessage(),
169                'feature' => $feature,
170            ]);
171
172            $this->logRepository->updateStatus((string) $log->_id, 'error', [
173                'prompt_output' => $e->getMessage(),
174            ]);
175
176            throw $e;
177        }
178    }
179
180    /**
181     * Estimate token count from a string (rough approximation: 1 token ≈ 4 characters).
182     */
183    private function estimateTokens(string $text): int
184    {
185        return (int) max(1, ceil(mb_strlen($text) / 4));
186    }
187
188    /**
189     * Calculate estimated cost in USD based on model pricing and token counts.
190     */
191    private function calculateCost(string $model, int $promptTokens, int $completionTokens): float
192    {
193        $key = strtolower($model);
194        $rates = self::COST_PER_MILLION_TOKENS[$key] ?? self::COST_PER_MILLION_TOKENS['default'];
195
196        $inputCost = ($promptTokens / 1_000_000) * $rates['input'];
197        $outputCost = ($completionTokens / 1_000_000) * $rates['output'];
198
199        return round($inputCost + $outputCost, 8);
200    }
201}