Update app.py

This commit is contained in:
Nacim
2025-06-23 16:46:10 +02:00
committed by GitHub
parent 2b9236bbe1
commit 0811abf23f

71
app.py
View File

@@ -5,20 +5,16 @@ from flask import Flask, request, jsonify, make_response
from presidio_analyzer import AnalyzerEngine, RecognizerRegistry, PatternRecognizer, Pattern from presidio_analyzer import AnalyzerEngine, RecognizerRegistry, PatternRecognizer, Pattern
from presidio_analyzer.nlp_engine import NlpEngineProvider from presidio_analyzer.nlp_engine import NlpEngineProvider
# On importe les classes des détecteurs prédéfinis que l'on veut pouvoir utiliser depuis le YAML
from presidio_analyzer.predefined_recognizers import ( from presidio_analyzer.predefined_recognizers import (
CreditCardRecognizer, CryptoRecognizer, DateRecognizer, IpRecognizer, CreditCardRecognizer, CryptoRecognizer, DateRecognizer, IpRecognizer,
MedicalLicenseRecognizer, UrlRecognizer, SpacyRecognizer MedicalLicenseRecognizer, UrlRecognizer, SpacyRecognizer
) )
# Configuration du logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Initialisation de l'application Flask
app = Flask(__name__) app = Flask(__name__)
# --- Dictionnaire pour mapper les noms du YAML aux classes Python ---
PREDEFINED_RECOGNIZERS_MAP = { PREDEFINED_RECOGNIZERS_MAP = {
"SpacyRecognizer": SpacyRecognizer, "SpacyRecognizer": SpacyRecognizer,
"CreditCardRecognizer": CreditCardRecognizer, "CreditCardRecognizer": CreditCardRecognizer,
@@ -29,64 +25,67 @@ PREDEFINED_RECOGNIZERS_MAP = {
"UrlRecognizer": UrlRecognizer, "UrlRecognizer": UrlRecognizer,
} }
# --- Initialisation Globale de l'Analyseur ---
analyzer = None analyzer = None
try: try:
logger.info("--- Presidio Analyzer Service Starting ---") logger.info("--- Presidio Analyzer Service Starting ---")
# 1. Charger la configuration depuis le fichier YAML
CONFIG_FILE_PATH = os.environ.get("PRESIDIO_ANALYZER_CONFIG_FILE", "conf/default.yaml") CONFIG_FILE_PATH = os.environ.get("PRESIDIO_ANALYZER_CONFIG_FILE", "conf/default.yaml")
logger.info(f"Loading configuration from: {CONFIG_FILE_PATH}") logger.info(f"Loading configuration from: {CONFIG_FILE_PATH}")
with open(CONFIG_FILE_PATH, 'r', encoding='utf-8') as f: with open(CONFIG_FILE_PATH, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
logger.info("Configuration file loaded successfully.") logger.info("Configuration file loaded successfully.")
# 2. Créer le fournisseur de moteur NLP
logger.info("Creating NLP engine provider...") logger.info("Creating NLP engine provider...")
provider = NlpEngineProvider(nlp_configuration=config) provider = NlpEngineProvider(nlp_configuration=config)
# 3. Créer le registre de recognizers EN SUIVANT LE YAML
logger.info("Creating and populating recognizer registry from config file...") logger.info("Creating and populating recognizer registry from config file...")
registry = RecognizerRegistry() registry = RecognizerRegistry()
supported_languages = config.get("supported_languages", ["en"]) supported_languages = config.get("supported_languages", ["en"])
# === DÉBUT DE LA CORRECTION MAJEURE === # Étape A: Construire les détecteurs personnalisés
# Étape A: On pré-construit tous les détecteurs personnalisés ("custom") définis dans la section 'recognizers'
custom_recognizers = {} custom_recognizers = {}
for recognizer_conf in config.get("recognizers", []): for recognizer_conf in config.get("recognizers", []):
patterns = [Pattern(name=p['name'], regex=p['regex'], score=p['score']) for p in recognizer_conf['patterns']] patterns = [Pattern(name=p['name'], regex=p['regex'], score=p['score']) for p in recognizer_conf['patterns']]
custom_recognizer = PatternRecognizer( custom_recognizers[recognizer_conf['name']] = PatternRecognizer(
supported_entity=recognizer_conf['entity_name'], supported_entity=recognizer_conf['entity_name'],
name=recognizer_conf['name'], name=recognizer_conf['name'],
supported_language=recognizer_conf['supported_language'], supported_language=recognizer_conf['supported_language'],
patterns=patterns, patterns=patterns,
context=recognizer_conf.get('context') context=recognizer_conf.get('context')
) )
custom_recognizers[recognizer_conf['name']] = custom_recognizer
# Étape B: On parcourt la liste 'recognizer_registry' pour activer les détecteurs demandés # Étape B: Activer les détecteurs listés dans recognizer_registry
for recognizer_name in config.get("recognizer_registry", []): for recognizer_name in config.get("recognizer_registry", []):
# Cas 1: Le détecteur est dans notre liste de détecteurs personnalisés
if recognizer_name in custom_recognizers: if recognizer_name in custom_recognizers:
registry.add_recognizer(custom_recognizers[recognizer_name]) registry.add_recognizer(custom_recognizers[recognizer_name])
logger.info(f"Loaded custom recognizer from registry list: {recognizer_name}") logger.info(f"Loaded CUSTOM recognizer from list: {recognizer_name}")
# Cas 2: Le détecteur est un détecteur prédéfini connu
elif recognizer_name in PREDEFINED_RECOGNIZERS_MAP: elif recognizer_name in PREDEFINED_RECOGNIZERS_MAP:
recognizer_class = PREDEFINED_RECOGNIZERS_MAP[recognizer_name] recognizer_class = PREDEFINED_RECOGNIZERS_MAP[recognizer_name]
# On crée une instance pour chaque langue supportée (en, fr)
for lang in supported_languages: for lang in supported_languages:
# CORRECTION : On utilise le mot-clé au singulier 'supported_language'
instance = recognizer_class(supported_language=lang) instance = recognizer_class(supported_language=lang)
registry.add_recognizer(instance) registry.add_recognizer(instance)
logger.info(f"Loaded predefined recognizer '{recognizer_name}' for languages: {supported_languages}") logger.info(f"Loaded PREDEFINED recognizer '{recognizer_name}' for languages: {supported_languages}")
else: else:
logger.warning(f"Recognizer '{recognizer_name}' from registry list was not found in custom or predefined lists.") logger.warning(f"Recognizer '{recognizer_name}' from registry list was not found.")
# === FIN DE LA CORRECTION MAJEURE === # === DÉBUT DU BLOC DE DIAGNOSTIC ===
logger.info("=================================================================")
logger.info("DIAGNOSTIC: FINAL REGISTRY STATE BEFORE ANALYZER ENGINE CREATION")
logger.info(f"Expected languages: {supported_languages}")
# On demande au registre lui-même quelles langues il pense supporter
actual_registry_langs = registry.supported_languages
logger.info(f"Actual languages reported by registry.supported_languages: {actual_registry_langs}")
logger.info("--- Detailed Recognizer List ---")
if not registry.recognizers:
logger.info("Registry is empty.")
for i, rec in enumerate(registry.recognizers):
logger.info(f" {i+1}: Recognizer='{rec.name}', Supported Languages={rec.supported_languages}, Entities={rec.supported_entities}")
logger.info("=================================================================")
# === FIN DU BLOC DE DIAGNOSTIC ===
# 4. Créer l'AnalyzerEngine avec tous les composants
logger.info("Initializing AnalyzerEngine with custom components...") logger.info("Initializing AnalyzerEngine with custom components...")
analyzer = AnalyzerEngine( analyzer = AnalyzerEngine(
nlp_engine=provider.create_engine(), nlp_engine=provider.create_engine(),
@@ -101,29 +100,19 @@ except Exception as e:
logger.exception("FATAL: Error during AnalyzerEngine initialization.") logger.exception("FATAL: Error during AnalyzerEngine initialization.")
analyzer = None analyzer = None
# Le reste du fichier Flask reste identique... # Le reste du fichier Flask est identique
@app.route('/analyze', methods=['POST']) @app.route('/analyze', methods=['POST'])
def analyze_text(): def analyze_text():
if not analyzer: if not analyzer: return jsonify({"error": "Analyzer engine is not available."}), 500
return jsonify({"error": "Analyzer engine is not available. Check startup logs for errors."}), 500
try: try:
data = request.get_json(force=True) data = request.get_json(force=True)
text_to_analyze = data.get("text", "") text = data.get("text", "")
default_lang = analyzer.supported_languages[0] if analyzer.supported_languages else "en" lang = data.get("language", "fr")
language = data.get("language", default_lang) if not text: return jsonify({"error": "text field is missing"}), 400
results = analyzer.analyze(text=text, language=lang)
if not text_to_analyze: return make_response(jsonify([res.to_dict() for res in results]), 200)
return jsonify({"error": "text field is missing or empty"}), 400
results = analyzer.analyze(text=text_to_analyze, language=language)
response_data = [res.to_dict() for res in results]
return make_response(jsonify(response_data), 200)
except Exception as e: except Exception as e:
logger.exception(f"Error during analysis request for language '{language}'.") logger.exception("Error during analysis request.")
if "No matching recognizers" in str(e):
return jsonify({"error": f"No recognizers available for language '{language}'. Please ensure the language model and recognizers are configured."}), 400
return jsonify({"error": str(e)}), 500 return jsonify({"error": str(e)}), 500
if __name__ == '__main__': if __name__ == '__main__':