feat: robust metric extraction with confidence score and proof snippets
- fixed Year-Prefix Bug in MetricParser - added metric_confidence and metric_proof_text to database - added Entity-Check and Annual-Priority to LLM prompt - improved UI: added confidence traffic light and mouse-over proof tooltip - restored missing API endpoints (create, bulk, wiki-override)
This commit is contained in:
29
company-explorer/reproduce_metric_bug.py
Normal file
29
company-explorer/reproduce_metric_bug.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add backend to path
|
||||
sys.path.append(os.path.join(os.getcwd(), 'backend'))
|
||||
|
||||
from lib.metric_parser import MetricParser
|
||||
|
||||
def test_parser():
|
||||
parser = MetricParser()
|
||||
|
||||
test_cases = [
|
||||
("80 (2020)", 80.0),
|
||||
("80 (Stand 2022)", 80.0),
|
||||
("1.005 Mitarbeiter", 1005.0),
|
||||
("375,6 Mio. €", 375.6),
|
||||
("ca. 50.000 m²", 50000.0)
|
||||
]
|
||||
|
||||
for text, expected in test_cases:
|
||||
result = parser.extract_numeric(text)
|
||||
print(f"Input: '{text}' -> Result: {result} (Expected: {expected})")
|
||||
if result != expected:
|
||||
print(" FAILED")
|
||||
else:
|
||||
print(" PASSED")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_parser()
|
||||
Reference in New Issue
Block a user