92 lines
3.1 KiB
Python
92 lines
3.1 KiB
Python
import os
|
|
import logging
|
|
from flask import Flask, request, jsonify, make_response
|
|
|
|
# On importe UNIQUEMENT le Provider, qui gère tout.
|
|
from presidio_analyzer import AnalyzerEngineProvider
|
|
|
|
# Configuration du logging
|
|
logging.basicConfig(level=logging.INFO,
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Initialisation de l'application Flask
|
|
app = Flask(__name__)
|
|
|
|
# --- Initialisation Globale de l'Analyseur via le Provider ---
|
|
analyzer = None
|
|
try:
|
|
logger.info("--- Presidio Analyzer Service Starting ---")
|
|
|
|
# Le chemin vers le fichier de config est selon variable d'environnement ou par défaut
|
|
CONFIG_FILE_PATH = os.environ.get("PRESIDIO_ANALYZER_CONFIG_FILE", "conf/default.yaml")
|
|
|
|
# Création du moteur via le Provider
|
|
provider = AnalyzerEngineProvider(analyzer_engine_conf_file=CONFIG_FILE_PATH)
|
|
analyzer = provider.create_engine()
|
|
|
|
logger.info(f"Analyzer created successfully, supported languages: {analyzer.supported_languages}")
|
|
logger.info("--- Presidio Analyzer Service Ready ---")
|
|
|
|
except Exception as e:
|
|
logger.exception("FATAL: Error during AnalyzerEngine initialization.")
|
|
analyzer = None
|
|
|
|
|
|
@app.route('/analyze', methods=['POST'])
|
|
def analyze_text():
|
|
if not analyzer:
|
|
return jsonify({"error": "Analyzer engine is not available. Check startup logs."}), 500
|
|
|
|
try:
|
|
data = request.get_json(force=True)
|
|
text_to_analyze = data.get("text", "")
|
|
language = data.get("language", "fr")
|
|
|
|
if not text_to_analyze:
|
|
return jsonify({"error": "text field is missing or empty"}), 400
|
|
|
|
# Analyse avec Presidio
|
|
results = analyzer.analyze(
|
|
text=text_to_analyze,
|
|
language=language
|
|
)
|
|
|
|
# Liste des labels/titres à ne PAS anonymiser
|
|
IGNORE_LABELS = {
|
|
"Témoins",
|
|
"Témoins clés",
|
|
"Coordonnées",
|
|
"Coordonnées bancaires",
|
|
"Contexte financier",
|
|
"Données sensibles",
|
|
"Contexte",
|
|
# Ajoute ici tout autre label problématique
|
|
}
|
|
|
|
def normalize_label(txt):
|
|
return txt.strip().lower()
|
|
|
|
ignore_labels_normalized = set(normalize_label(l) for l in IGNORE_LABELS)
|
|
|
|
# Filtrage post-analyse pour enlever les entités correspondant aux labels/titres
|
|
filtered_results = []
|
|
for res in results:
|
|
ent_text = text_to_analyze[res.start:res.end]
|
|
if normalize_label(ent_text) not in ignore_labels_normalized:
|
|
filtered_results.append(res)
|
|
|
|
# Préparation de la réponse JSON
|
|
response_data = [res.to_dict() for res in filtered_results]
|
|
return make_response(jsonify(response_data), 200)
|
|
|
|
except Exception as e:
|
|
logger.exception(f"Error during analysis for language '{language}'.")
|
|
if "No matching recognizers" in str(e):
|
|
return jsonify({"error": f"No recognizers available for language '{language}'."}), 400
|
|
return jsonify({"error": str(e)}), 500
|
|
|
|
|
|
if __name__ == '__main__':
|
|
app.run(host='0.0.0.0', port=5001)
|