Spaces:
Paused
Paused
Upload 4 files
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
from flask import Flask, request, jsonify, Response, stream_with_context, render_template_string
|
| 2 |
-
from google.generativeai.types import
|
| 3 |
from google.api_core.exceptions import InvalidArgument, ResourceExhausted, Aborted, InternalServerError, ServiceUnavailable, PermissionDenied
|
| 4 |
import google.generativeai as genai
|
| 5 |
import json
|
|
@@ -13,6 +13,12 @@ import time
|
|
| 13 |
import requests
|
| 14 |
from collections import deque
|
| 15 |
import random
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 18 |
|
|
@@ -38,7 +44,7 @@ request_counts = {}
|
|
| 38 |
api_key_blacklist = set()
|
| 39 |
api_key_blacklist_duration = 60
|
| 40 |
|
| 41 |
-
|
| 42 |
safety_settings = [
|
| 43 |
{
|
| 44 |
"category": "HARM_CATEGORY_HARASSMENT",
|
|
@@ -75,6 +81,44 @@ safety_settings_g2 = [
|
|
| 75 |
"threshold": "OFF"
|
| 76 |
}
|
| 77 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
class APIKeyManager:
|
| 79 |
def __init__(self):
|
| 80 |
self.api_keys = re.findall(r"AIzaSy[a-zA-Z0-9_-]{33}", os.environ.get('KeyArray'))
|
|
@@ -138,7 +182,7 @@ GEMINI_MODELS = [
|
|
| 138 |
|
| 139 |
@app.route('/')
|
| 140 |
def index():
|
| 141 |
-
main_content = "Moonfanz Reminiproxy v2.
|
| 142 |
html_template = """
|
| 143 |
<!DOCTYPE html>
|
| 144 |
<html>
|
|
@@ -303,113 +347,110 @@ def chat_completions():
|
|
| 303 |
max_tokens = request_data.get('max_tokens', 8192)
|
| 304 |
stream = request_data.get('stream', False)
|
| 305 |
hint = "流式" if stream else "非流"
|
| 306 |
-
logger.info(f"\n{model} [{hint}] →
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
# logger.info(f"系统指令: {r_s}")
|
| 315 |
if error_response:
|
| 316 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
| 317 |
return jsonify(error_response), 400
|
| 318 |
|
| 319 |
def do_request(current_api_key, attempt):
|
| 320 |
-
isok,
|
| 321 |
if not isok:
|
| 322 |
-
logger.warning(f"
|
| 323 |
switch_api_key()
|
| 324 |
return 0, None
|
| 325 |
|
| 326 |
increment_request_count(current_api_key)
|
| 327 |
|
| 328 |
-
genai.configure(api_key=current_api_key)
|
| 329 |
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
"
|
| 333 |
}
|
| 334 |
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
|
|
|
|
|
|
|
|
|
| 342 |
|
| 343 |
try:
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
| 347 |
else:
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
except
|
| 351 |
return handle_api_error(e, attempt)
|
| 352 |
|
| 353 |
-
def
|
|
|
|
| 354 |
try:
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
]
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
data
|
| 397 |
-
'choices': [
|
| 398 |
-
{
|
| 399 |
-
'delta': {},
|
| 400 |
-
'finish_reason': 'stop',
|
| 401 |
-
'index': 0
|
| 402 |
-
}
|
| 403 |
-
],
|
| 404 |
-
'object': 'chat.completion.chunk'
|
| 405 |
-
}
|
| 406 |
|
| 407 |
-
|
|
|
|
|
|
|
| 408 |
|
| 409 |
attempt = 0
|
| 410 |
success = 0
|
| 411 |
response = None
|
| 412 |
-
|
| 413 |
for attempt in range(1, MAX_RETRIES + 1):
|
| 414 |
logger.info(f"第 {attempt}/{MAX_RETRIES} 次尝试 ...")
|
| 415 |
success, response = do_request(current_api_key, attempt)
|
|
@@ -438,7 +479,10 @@ def chat_completions():
|
|
| 438 |
return jsonify(response), 500 if response is not None else 503
|
| 439 |
|
| 440 |
if stream:
|
| 441 |
-
return Response(
|
|
|
|
|
|
|
|
|
|
| 442 |
else:
|
| 443 |
try:
|
| 444 |
text_content = response.text
|
|
@@ -500,7 +544,7 @@ if __name__ == '__main__':
|
|
| 500 |
|
| 501 |
scheduler.add_job(keep_alive, 'interval', hours=12)
|
| 502 |
scheduler.start()
|
| 503 |
-
logger.info(f"Reminiproxy v2.
|
| 504 |
logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
|
| 505 |
logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
|
| 506 |
logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
|
|
|
|
| 1 |
from flask import Flask, request, jsonify, Response, stream_with_context, render_template_string
|
| 2 |
+
from google.generativeai.types import StopCandidateException, generation_types
|
| 3 |
from google.api_core.exceptions import InvalidArgument, ResourceExhausted, Aborted, InternalServerError, ServiceUnavailable, PermissionDenied
|
| 4 |
import google.generativeai as genai
|
| 5 |
import json
|
|
|
|
| 13 |
import requests
|
| 14 |
from collections import deque
|
| 15 |
import random
|
| 16 |
+
import codecs
|
| 17 |
+
import json
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import Optional, Iterator, Dict, Any
|
| 20 |
+
|
| 21 |
+
app = Flask(__name__)
|
| 22 |
|
| 23 |
os.environ['TZ'] = 'Asia/Shanghai'
|
| 24 |
|
|
|
|
| 44 |
api_key_blacklist = set()
|
| 45 |
api_key_blacklist_duration = 60
|
| 46 |
|
| 47 |
+
|
| 48 |
safety_settings = [
|
| 49 |
{
|
| 50 |
"category": "HARM_CATEGORY_HARASSMENT",
|
|
|
|
| 81 |
"threshold": "OFF"
|
| 82 |
}
|
| 83 |
]
|
| 84 |
+
@dataclass
|
| 85 |
+
class GeneratedText:
|
| 86 |
+
"""用于存储生成的文本片段"""
|
| 87 |
+
text: str
|
| 88 |
+
finish_reason: Optional[str] = None
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class ResponseWrapper:
|
| 92 |
+
"""处理非流式响应的包装类"""
|
| 93 |
+
def __init__(self, data: Dict[Any, Any]):
|
| 94 |
+
self._data = data
|
| 95 |
+
self._text = self._extract_text()
|
| 96 |
+
self._finish_reason = self._extract_finish_reason()
|
| 97 |
+
|
| 98 |
+
def _extract_text(self) -> str:
|
| 99 |
+
"""从响应数据中提取文本"""
|
| 100 |
+
try:
|
| 101 |
+
return self._data['candidates'][0]['content']['parts'][0]['text']
|
| 102 |
+
except (KeyError, IndexError):
|
| 103 |
+
return ""
|
| 104 |
+
|
| 105 |
+
def _extract_finish_reason(self) -> Optional[str]:
|
| 106 |
+
"""提取完成原因"""
|
| 107 |
+
try:
|
| 108 |
+
return self._data['candidates'][0].get('finishReason')
|
| 109 |
+
except (KeyError, IndexError):
|
| 110 |
+
return None
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def text(self) -> str:
|
| 114 |
+
"""获取响应文本"""
|
| 115 |
+
return self._text
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def finish_reason(self) -> Optional[str]:
|
| 119 |
+
"""获取完成原因"""
|
| 120 |
+
return self._finish_reason
|
| 121 |
+
|
| 122 |
class APIKeyManager:
|
| 123 |
def __init__(self):
|
| 124 |
self.api_keys = re.findall(r"AIzaSy[a-zA-Z0-9_-]{33}", os.environ.get('KeyArray'))
|
|
|
|
| 182 |
|
| 183 |
@app.route('/')
|
| 184 |
def index():
|
| 185 |
+
main_content = "Moonfanz Reminiproxy v2.3.0 2025-01-11"
|
| 186 |
html_template = """
|
| 187 |
<!DOCTYPE html>
|
| 188 |
<html>
|
|
|
|
| 347 |
max_tokens = request_data.get('max_tokens', 8192)
|
| 348 |
stream = request_data.get('stream', False)
|
| 349 |
hint = "流式" if stream else "非流"
|
| 350 |
+
logger.info(f"\n{model} [{hint}] → ...")
|
| 351 |
+
is_thinking = 'thinking' in model
|
| 352 |
+
api_version = 'v1alpha' if is_thinking else 'v1beta'
|
| 353 |
+
response_type = 'streamGenerateContent' if stream else 'generateContent'
|
| 354 |
+
is_SSE = '&alt=sse' if stream else ''
|
| 355 |
+
|
| 356 |
+
gemini_history, system_instruction, error_response = func.process_messages_for_gemini(messages)
|
| 357 |
+
|
|
|
|
| 358 |
if error_response:
|
| 359 |
logger.error(f"处理输入消息时出错↙\n {error_response}")
|
| 360 |
return jsonify(error_response), 400
|
| 361 |
|
| 362 |
def do_request(current_api_key, attempt):
|
| 363 |
+
isok, time_remaining = is_within_rate_limit(current_api_key)
|
| 364 |
if not isok:
|
| 365 |
+
logger.warning(f"暂时超过限额,该API key将在 {time_remaining} 秒后启用...")
|
| 366 |
switch_api_key()
|
| 367 |
return 0, None
|
| 368 |
|
| 369 |
increment_request_count(current_api_key)
|
| 370 |
|
|
|
|
| 371 |
|
| 372 |
+
url = f"https://generativelanguage.googleapis.com/{api_version}/models/{model}:{response_type}?key={current_api_key}{is_SSE}"
|
| 373 |
+
headers = {
|
| 374 |
+
"Content-Type": "application/json",
|
| 375 |
}
|
| 376 |
|
| 377 |
+
data = {
|
| 378 |
+
"contents": gemini_history,
|
| 379 |
+
"generationConfig": {
|
| 380 |
+
"temperature": temperature,
|
| 381 |
+
"maxOutputTokens": max_tokens,
|
| 382 |
+
},
|
| 383 |
+
"safetySettings": safety_settings_g2 if model == 'gemini-2.0-flash-exp' else safety_settings,
|
| 384 |
+
}
|
| 385 |
+
if system_instruction:
|
| 386 |
+
data["system_instruction"] = system_instruction
|
| 387 |
|
| 388 |
try:
|
| 389 |
+
response = requests.post(url, headers=headers, json=data, stream=True)
|
| 390 |
+
response.raise_for_status()
|
| 391 |
+
|
| 392 |
+
if stream:
|
| 393 |
+
|
| 394 |
+
return 1, response
|
| 395 |
else:
|
| 396 |
+
|
| 397 |
+
return 1, ResponseWrapper(response.json())
|
| 398 |
+
except requests.exceptions.RequestException as e:
|
| 399 |
return handle_api_error(e, attempt)
|
| 400 |
|
| 401 |
+
def generate_stream(response):
|
| 402 |
+
buffer = b""
|
| 403 |
try:
|
| 404 |
+
for line in response.iter_lines():
|
| 405 |
+
if not line:
|
| 406 |
+
continue
|
| 407 |
+
try:
|
| 408 |
+
if line.startswith(b'data: '):
|
| 409 |
+
line = line[6:]
|
| 410 |
+
|
| 411 |
+
buffer += line
|
| 412 |
+
|
| 413 |
+
try:
|
| 414 |
+
data = json.loads(buffer.decode('utf-8'))
|
| 415 |
+
buffer = b""
|
| 416 |
+
if 'candidates' in data and data['candidates']:
|
| 417 |
+
candidate = data['candidates'][0]
|
| 418 |
+
if 'content' in candidate:
|
| 419 |
+
content = candidate['content']
|
| 420 |
+
if 'parts' in content and content['parts']:
|
| 421 |
+
text = content['parts'][0].get('text', '')
|
| 422 |
+
finish_reason = candidate.get('finishReason')
|
| 423 |
+
|
| 424 |
+
if text:
|
| 425 |
+
data = {
|
| 426 |
+
'choices': [{
|
| 427 |
+
'delta': {
|
| 428 |
+
'content': text
|
| 429 |
+
},
|
| 430 |
+
'finish_reason': finish_reason,
|
| 431 |
+
'index': 0
|
| 432 |
+
}],
|
| 433 |
+
'object': 'chat.completion.chunk'
|
| 434 |
+
}
|
| 435 |
+
yield f"data: {json.dumps(data)}\n\n"
|
| 436 |
+
|
| 437 |
+
except json.JSONDecodeError:
|
| 438 |
+
logger.debug(f"JSONDecodeError, buffer now: {buffer}")
|
| 439 |
+
continue
|
| 440 |
+
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.error(f"Stream error during processing: {e}, Raw data line: {line}")
|
| 443 |
+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 444 |
+
|
| 445 |
+
yield f"data: {json.dumps({'choices': [{'delta': {}, 'finish_reason': 'stop', 'index': 0}]})}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 446 |
|
| 447 |
+
except Exception as e:
|
| 448 |
+
logger.error(f"Stream error: {e}")
|
| 449 |
+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 450 |
|
| 451 |
attempt = 0
|
| 452 |
success = 0
|
| 453 |
response = None
|
|
|
|
| 454 |
for attempt in range(1, MAX_RETRIES + 1):
|
| 455 |
logger.info(f"第 {attempt}/{MAX_RETRIES} 次尝试 ...")
|
| 456 |
success, response = do_request(current_api_key, attempt)
|
|
|
|
| 479 |
return jsonify(response), 500 if response is not None else 503
|
| 480 |
|
| 481 |
if stream:
|
| 482 |
+
return Response(
|
| 483 |
+
stream_with_context(generate_stream(response)),
|
| 484 |
+
mimetype='text/event-stream'
|
| 485 |
+
)
|
| 486 |
else:
|
| 487 |
try:
|
| 488 |
text_content = response.text
|
|
|
|
| 544 |
|
| 545 |
scheduler.add_job(keep_alive, 'interval', hours=12)
|
| 546 |
scheduler.start()
|
| 547 |
+
logger.info(f"Reminiproxy v2.3.0 启动")
|
| 548 |
logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
|
| 549 |
logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
|
| 550 |
logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
|
func.py
CHANGED
|
@@ -106,13 +106,7 @@ def process_messages_for_gemini(messages):
|
|
| 106 |
else:
|
| 107 |
errors.append(f"Invalid role: {role}")
|
| 108 |
|
| 109 |
-
if gemini_history:
|
| 110 |
-
user_message = gemini_history[-1]
|
| 111 |
-
gemini_history = gemini_history[:-1]
|
| 112 |
-
else:
|
| 113 |
-
user_message = {"role": "user", "parts": [""]}
|
| 114 |
-
|
| 115 |
if errors:
|
| 116 |
-
return gemini_history,
|
| 117 |
else:
|
| 118 |
-
return gemini_history,
|
|
|
|
| 106 |
else:
|
| 107 |
errors.append(f"Invalid role: {role}")
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
if errors:
|
| 110 |
+
return gemini_history, {"parts": [{"text": system_instruction_text}]}, (jsonify({'error': errors}), 400)
|
| 111 |
else:
|
| 112 |
+
return gemini_history, {"parts": [{"text": system_instruction_text}]}, None
|