Spaces:
Runtime error
Runtime error
| import os | |
| from fastapi import FastAPI, File, UploadFile | |
| from pydantic import BaseModel | |
| from transformers import pipeline | |
| from huggingface_hub import hf_hub_download | |
| from PIL import Image | |
| import joblib | |
| import re | |
| import string | |
| import io | |
| import uvicorn | |
| # β Set Hugging Face Cache to a writable directory (Fixes Permission Error) | |
| os.environ["TRANSFORMERS_CACHE"] = "/tmp" | |
| os.environ["HF_HOME"] = "/tmp" | |
| # β Manually Download the NSFW Model to `/tmp` | |
| try: | |
| model_path = hf_hub_download(repo_id="LukeJacob2023/nsfw-image-detector", filename="pytorch_model.bin", cache_dir="/tmp") | |
| pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir="/tmp") | |
| print("β NSFW Model Loaded Successfully!") | |
| except Exception as e: | |
| print(f"β Error Loading NSFW Model: {e}") | |
| exit(1) | |
| # β Load Toxic Text Classification Model | |
| try: | |
| model = joblib.load("toxic_classifier.pkl") | |
| vectorizer = joblib.load("vectorizer.pkl") | |
| print("β Toxic Text Model & Vectorizer Loaded Successfully!") | |
| except Exception as e: | |
| print(f"β Error Loading Toxic Text Model: {e}") | |
| exit(1) | |
| # β Initialize FastAPI | |
| app = FastAPI() | |
| # π Text Input Model | |
| class TextInput(BaseModel): | |
| text: str | |
| # πΉ Text Preprocessing Function | |
| def preprocess_text(text): | |
| text = text.lower() | |
| text = re.sub(r'\d+', '', text) # Remove numbers | |
| text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation | |
| return text.strip() | |
| # π NSFW Image Classification API | |
| async def classify_image(file: UploadFile = File(...)): | |
| try: | |
| image = Image.open(io.BytesIO(await file.read())) | |
| results = pipe(image) | |
| classification_label = max(results, key=lambda x: x['score'])['label'] | |
| nsfw_labels = {"sexy", "porn", "hentai"} | |
| nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW" | |
| return {"status": nsfw_status, "results": results} | |
| except Exception as e: | |
| return {"error": str(e)} | |
| # π Toxic Text Classification API | |
| async def classify_text(data: TextInput): | |
| try: | |
| processed_text = preprocess_text(data.text) | |
| text_vectorized = vectorizer.transform([processed_text]) | |
| prediction = model.predict(text_vectorized) | |
| result = "Toxic" if prediction[0] == 1 else "Safe" | |
| return {"prediction": result} | |
| except Exception as e: | |
| return {"error": str(e)} | |
| # β Run FastAPI using Uvicorn (Hugging Face requires port 7860) | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |