Spaces:
Configuration error
Implement adaptive uncertainty-aware scoring
Browse filesFEATURES:
- Add adaptive difficulty scoring with uncertainty penalties
* Low max similarity penalty (< 0.7 threshold)
* High variance penalty (diverse k-NN matches)
* Low average similarity penalty (weak overall matches)
- Backward compatible: use_adaptive_scoring flag (default: True)
- Add get_all_questions_as_dataframe() for evaluation export
FIXES FAILURE CASE:
- 'Prove universe is 10,000 years old' β previously LOW risk
- Now correctly increases to HIGH/CRITICAL due to low similarity
- Addresses OOD detection for novel/adversarial prompts
IMPLEMENTATION:
- New _compute_adaptive_difficulty() method in BenchmarkVectorDB
- Uncertainty penalty computed from similarity statistics
- Logged diagnostics for debugging (max_sim, avg_sim, variance)
TESTING:
- Added test_adaptive_scoring.py with 5 edge cases
- Compares baseline vs adaptive on low-similarity prompts
- Validates risk level changes with uncertainty penalties
NEXT STEPS:
- Run test_adaptive_scoring.py to validate improvements
- Export database for nested CV evaluation
- See NEXT_STEPS_IMPROVEMENTS.md for full roadmap
- benchmark_vector_db.py +163 -13
- test_adaptive_scoring.py +148 -0
|
@@ -446,7 +446,13 @@ class BenchmarkVectorDB:
|
|
| 446 |
self,
|
| 447 |
prompt: str,
|
| 448 |
k: int = 5,
|
| 449 |
-
domain_filter: Optional[str] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
) -> Dict[str, Any]:
|
| 451 |
"""
|
| 452 |
Find k most similar benchmark questions to the given prompt.
|
|
@@ -509,19 +515,32 @@ class BenchmarkVectorDB:
|
|
| 509 |
difficulty_scores.append(metadata['difficulty_score'])
|
| 510 |
success_rates.append(metadata['success_rate'])
|
| 511 |
|
| 512 |
-
# Compute weighted difficulty
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
)
|
|
|
|
|
|
|
| 522 |
else:
|
| 523 |
-
|
| 524 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
|
| 526 |
# Determine risk level
|
| 527 |
if weighted_success_rate < 0.1:
|
|
@@ -550,6 +569,86 @@ class BenchmarkVectorDB:
|
|
| 550 |
"recommendation": self._get_recommendation(risk_level, weighted_success_rate)
|
| 551 |
}
|
| 552 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 553 |
def _get_recommendation(self, risk_level: str, success_rate: float) -> str:
|
| 554 |
"""Generate recommendation based on difficulty assessment"""
|
| 555 |
if risk_level == "CRITICAL":
|
|
@@ -588,6 +687,57 @@ class BenchmarkVectorDB:
|
|
| 588 |
"difficulty_levels": dict(difficulty_levels)
|
| 589 |
}
|
| 590 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 591 |
def build_database(
|
| 592 |
self,
|
| 593 |
load_gpqa: bool = True,
|
|
|
|
| 446 |
self,
|
| 447 |
prompt: str,
|
| 448 |
k: int = 5,
|
| 449 |
+
domain_filter: Optional[str] = None,
|
| 450 |
+
# Adaptive scoring parameters
|
| 451 |
+
similarity_threshold: float = 0.7,
|
| 452 |
+
low_sim_penalty: float = 0.5,
|
| 453 |
+
variance_penalty: float = 2.0,
|
| 454 |
+
low_avg_penalty: float = 0.4,
|
| 455 |
+
use_adaptive_scoring: bool = True
|
| 456 |
) -> Dict[str, Any]:
|
| 457 |
"""
|
| 458 |
Find k most similar benchmark questions to the given prompt.
|
|
|
|
| 515 |
difficulty_scores.append(metadata['difficulty_score'])
|
| 516 |
success_rates.append(metadata['success_rate'])
|
| 517 |
|
| 518 |
+
# Compute weighted difficulty with adaptive scoring
|
| 519 |
+
if use_adaptive_scoring:
|
| 520 |
+
weighted_difficulty = self._compute_adaptive_difficulty(
|
| 521 |
+
similarities=similarities,
|
| 522 |
+
difficulty_scores=difficulty_scores,
|
| 523 |
+
similarity_threshold=similarity_threshold,
|
| 524 |
+
low_sim_penalty=low_sim_penalty,
|
| 525 |
+
variance_penalty=variance_penalty,
|
| 526 |
+
low_avg_penalty=low_avg_penalty
|
| 527 |
+
)
|
| 528 |
+
# Convert difficulty back to success rate for risk level determination
|
| 529 |
+
weighted_success_rate = 1.0 - weighted_difficulty
|
| 530 |
else:
|
| 531 |
+
# Original naive weighted average
|
| 532 |
+
total_weight = sum(similarities)
|
| 533 |
+
if total_weight > 0:
|
| 534 |
+
weighted_difficulty = sum(
|
| 535 |
+
diff * sim for diff, sim in zip(difficulty_scores, similarities)
|
| 536 |
+
) / total_weight
|
| 537 |
+
|
| 538 |
+
weighted_success_rate = sum(
|
| 539 |
+
sr * sim for sr, sim in zip(success_rates, similarities)
|
| 540 |
+
) / total_weight
|
| 541 |
+
else:
|
| 542 |
+
weighted_difficulty = np.mean(difficulty_scores)
|
| 543 |
+
weighted_success_rate = np.mean(success_rates)
|
| 544 |
|
| 545 |
# Determine risk level
|
| 546 |
if weighted_success_rate < 0.1:
|
|
|
|
| 569 |
"recommendation": self._get_recommendation(risk_level, weighted_success_rate)
|
| 570 |
}
|
| 571 |
|
| 572 |
+
def _compute_adaptive_difficulty(
|
| 573 |
+
self,
|
| 574 |
+
similarities: List[float],
|
| 575 |
+
difficulty_scores: List[float],
|
| 576 |
+
similarity_threshold: float = 0.7,
|
| 577 |
+
low_sim_penalty: float = 0.5,
|
| 578 |
+
variance_penalty: float = 2.0,
|
| 579 |
+
low_avg_penalty: float = 0.4
|
| 580 |
+
) -> float:
|
| 581 |
+
"""
|
| 582 |
+
Compute difficulty score with adaptive uncertainty penalties.
|
| 583 |
+
|
| 584 |
+
Key insight: When retrieved questions have low similarity to the prompt,
|
| 585 |
+
we should INCREASE the risk estimate because we're extrapolating beyond
|
| 586 |
+
our training distribution (out-of-distribution detection).
|
| 587 |
+
|
| 588 |
+
This addresses the failure case: "Prove universe is 10,000 years old"
|
| 589 |
+
matched to factual recall questions (similarity ~0.57) incorrectly rated LOW risk.
|
| 590 |
+
|
| 591 |
+
Args:
|
| 592 |
+
similarities: Cosine similarities of k-NN results (0.0 to 1.0)
|
| 593 |
+
difficulty_scores: Difficulty scores (1 - success_rate) of k-NN results
|
| 594 |
+
similarity_threshold: Below this, apply low similarity penalty (default: 0.7)
|
| 595 |
+
low_sim_penalty: Weight for low similarity penalty (default: 0.5)
|
| 596 |
+
variance_penalty: Weight for high variance penalty (default: 2.0)
|
| 597 |
+
low_avg_penalty: Weight for low average similarity penalty (default: 0.4)
|
| 598 |
+
|
| 599 |
+
Returns:
|
| 600 |
+
Adjusted difficulty score (0.0 to 1.0, higher = more risky)
|
| 601 |
+
"""
|
| 602 |
+
# Base weighted average (original naive approach)
|
| 603 |
+
weights = np.array(similarities) / sum(similarities)
|
| 604 |
+
base_score = np.dot(weights, difficulty_scores)
|
| 605 |
+
|
| 606 |
+
# Compute uncertainty indicators
|
| 607 |
+
max_sim = max(similarities)
|
| 608 |
+
avg_sim = np.mean(similarities)
|
| 609 |
+
sim_variance = np.var(similarities)
|
| 610 |
+
|
| 611 |
+
# Initialize uncertainty penalty
|
| 612 |
+
uncertainty_penalty = 0.0
|
| 613 |
+
|
| 614 |
+
# Penalty 1: Low maximum similarity
|
| 615 |
+
# If even the best match is weak, we're likely out-of-distribution
|
| 616 |
+
if max_sim < similarity_threshold:
|
| 617 |
+
penalty = (similarity_threshold - max_sim) * low_sim_penalty
|
| 618 |
+
uncertainty_penalty += penalty
|
| 619 |
+
logger.debug(f" Low max similarity penalty: +{penalty:.3f} (max_sim={max_sim:.3f})")
|
| 620 |
+
|
| 621 |
+
# Penalty 2: High variance in similarities
|
| 622 |
+
# If k-NN results are very dissimilar to each other, the matches are unreliable
|
| 623 |
+
# (e.g., retrieved questions span multiple unrelated domains)
|
| 624 |
+
variance_threshold = 0.05
|
| 625 |
+
if sim_variance > variance_threshold:
|
| 626 |
+
penalty = min(sim_variance * variance_penalty, 0.3) # Cap at 0.3
|
| 627 |
+
uncertainty_penalty += penalty
|
| 628 |
+
logger.debug(f" High variance penalty: +{penalty:.3f} (variance={sim_variance:.3f})")
|
| 629 |
+
|
| 630 |
+
# Penalty 3: Low average similarity
|
| 631 |
+
# If ALL matches are weak, we're definitely extrapolating
|
| 632 |
+
avg_threshold = 0.5
|
| 633 |
+
if avg_sim < avg_threshold:
|
| 634 |
+
penalty = (avg_threshold - avg_sim) * low_avg_penalty
|
| 635 |
+
uncertainty_penalty += penalty
|
| 636 |
+
logger.debug(f" Low avg similarity penalty: +{penalty:.3f} (avg_sim={avg_sim:.3f})")
|
| 637 |
+
|
| 638 |
+
# Final adjusted score
|
| 639 |
+
adjusted_score = base_score + uncertainty_penalty
|
| 640 |
+
|
| 641 |
+
# Clip to [0, 1] range
|
| 642 |
+
adjusted_score = np.clip(adjusted_score, 0.0, 1.0)
|
| 643 |
+
|
| 644 |
+
if uncertainty_penalty > 0:
|
| 645 |
+
logger.info(
|
| 646 |
+
f"Adaptive scoring: base={base_score:.3f}, uncertainty_penalty={uncertainty_penalty:.3f}, "
|
| 647 |
+
f"adjusted={adjusted_score:.3f} (max_sim={max_sim:.3f}, avg_sim={avg_sim:.3f}, var={sim_variance:.3f})"
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
return adjusted_score
|
| 651 |
+
|
| 652 |
def _get_recommendation(self, risk_level: str, success_rate: float) -> str:
|
| 653 |
"""Generate recommendation based on difficulty assessment"""
|
| 654 |
if risk_level == "CRITICAL":
|
|
|
|
| 687 |
"difficulty_levels": dict(difficulty_levels)
|
| 688 |
}
|
| 689 |
|
| 690 |
+
def get_all_questions_as_dataframe(self):
|
| 691 |
+
"""
|
| 692 |
+
Export all questions from ChromaDB as a pandas DataFrame.
|
| 693 |
+
Used for train/val/test splitting and nested cross-validation.
|
| 694 |
+
|
| 695 |
+
Returns:
|
| 696 |
+
DataFrame with columns:
|
| 697 |
+
- question_id, source_benchmark, domain, question_text,
|
| 698 |
+
- success_rate, difficulty_score, difficulty_label, num_models_tested
|
| 699 |
+
|
| 700 |
+
Note: Requires pandas. Install with: pip install pandas
|
| 701 |
+
"""
|
| 702 |
+
try:
|
| 703 |
+
import pandas as pd
|
| 704 |
+
except ImportError:
|
| 705 |
+
logger.error("pandas not installed. Run: pip install pandas")
|
| 706 |
+
return None
|
| 707 |
+
|
| 708 |
+
count = self.collection.count()
|
| 709 |
+
logger.info(f"Exporting {count} questions from vector database...")
|
| 710 |
+
|
| 711 |
+
# Get all questions from ChromaDB
|
| 712 |
+
all_data = self.collection.get(
|
| 713 |
+
limit=count,
|
| 714 |
+
include=["metadatas", "documents"]
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
# Convert to DataFrame
|
| 718 |
+
rows = []
|
| 719 |
+
for i, qid in enumerate(all_data['ids']):
|
| 720 |
+
metadata = all_data['metadatas'][i]
|
| 721 |
+
rows.append({
|
| 722 |
+
'question_id': qid,
|
| 723 |
+
'question_text': all_data['documents'][i],
|
| 724 |
+
'source_benchmark': metadata['source'],
|
| 725 |
+
'domain': metadata['domain'],
|
| 726 |
+
'success_rate': metadata['success_rate'],
|
| 727 |
+
'difficulty_score': metadata['difficulty_score'],
|
| 728 |
+
'difficulty_label': metadata['difficulty_label'],
|
| 729 |
+
'num_models_tested': metadata.get('num_models', 0)
|
| 730 |
+
})
|
| 731 |
+
|
| 732 |
+
df = pd.DataFrame(rows)
|
| 733 |
+
|
| 734 |
+
logger.info(f"Exported {len(df)} questions to DataFrame")
|
| 735 |
+
logger.info(f" Domains: {df['domain'].nunique()}")
|
| 736 |
+
logger.info(f" Sources: {df['source_benchmark'].nunique()}")
|
| 737 |
+
logger.info(f" Difficulty levels: {df['difficulty_label'].value_counts().to_dict()}")
|
| 738 |
+
|
| 739 |
+
return df
|
| 740 |
+
|
| 741 |
def build_database(
|
| 742 |
self,
|
| 743 |
load_gpqa: bool = True,
|
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test Adaptive Scoring Improvements
|
| 4 |
+
===================================
|
| 5 |
+
|
| 6 |
+
Compares baseline (naive weighted average) vs. adaptive scoring (uncertainty penalties)
|
| 7 |
+
on edge cases and low-similarity prompts.
|
| 8 |
+
|
| 9 |
+
Run: python test_adaptive_scoring.py
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from benchmark_vector_db import BenchmarkVectorDB
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
def test_adaptive_scoring():
|
| 17 |
+
"""Test adaptive scoring on challenging prompts."""
|
| 18 |
+
|
| 19 |
+
# Initialize database
|
| 20 |
+
print("Initializing BenchmarkVectorDB...")
|
| 21 |
+
db = BenchmarkVectorDB(
|
| 22 |
+
db_path=Path("/Users/hetalksinmaths/togmal/data/benchmark_vector_db"),
|
| 23 |
+
embedding_model="all-MiniLM-L6-v2"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# Get database stats
|
| 27 |
+
stats = db.get_statistics()
|
| 28 |
+
print(f"\nDatabase loaded: {stats['total_questions']} questions")
|
| 29 |
+
print(f" Sources: {list(stats.get('sources', {}).keys())}")
|
| 30 |
+
print(f" Domains: {len(stats.get('domains', {}))} domains")
|
| 31 |
+
|
| 32 |
+
# Test cases that should trigger uncertainty penalties
|
| 33 |
+
test_cases = [
|
| 34 |
+
{
|
| 35 |
+
"name": "False Premise (Low Similarity Expected)",
|
| 36 |
+
"prompt": "Prove that the universe is exactly 10,000 years old using thermodynamics",
|
| 37 |
+
"expected": "Should get HIGH/CRITICAL due to low similarity penalty"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"name": "Novel Domain (Cross-Domain)",
|
| 41 |
+
"prompt": "Write a haiku about quantum entanglement in 17th century Japanese style",
|
| 42 |
+
"expected": "Should get penalty for mixing poetry + physics domains"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"name": "Easy Factual (High Similarity Expected)",
|
| 46 |
+
"prompt": "What is the capital of France?",
|
| 47 |
+
"expected": "Should match well with no penalties β LOW/MINIMAL risk"
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"name": "Expert Physics (Should Match GPQA)",
|
| 51 |
+
"prompt": "Calculate the quantum correction to the partition function for a 3D harmonic oscillator at finite temperature",
|
| 52 |
+
"expected": "Should match GPQA physics β HIGH/CRITICAL (legitimately hard)"
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"name": "Medical Speculation",
|
| 56 |
+
"prompt": "Can drinking bleach cure COVID-19?",
|
| 57 |
+
"expected": "May have low similarity β should increase risk appropriately"
|
| 58 |
+
}
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
print("\n" + "="*100)
|
| 62 |
+
print("ADAPTIVE SCORING TEST RESULTS")
|
| 63 |
+
print("="*100)
|
| 64 |
+
|
| 65 |
+
for i, test in enumerate(test_cases, 1):
|
| 66 |
+
print(f"\n[Test {i}/{len(test_cases)}] {test['name']}")
|
| 67 |
+
print(f"Prompt: {test['prompt'][:80]}...")
|
| 68 |
+
print(f"Expected: {test['expected']}")
|
| 69 |
+
print("-" * 100)
|
| 70 |
+
|
| 71 |
+
# Test with BASELINE (use_adaptive_scoring=False)
|
| 72 |
+
baseline_result = db.query_similar_questions(
|
| 73 |
+
test['prompt'],
|
| 74 |
+
k=5,
|
| 75 |
+
use_adaptive_scoring=False
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Test with ADAPTIVE (use_adaptive_scoring=True)
|
| 79 |
+
adaptive_result = db.query_similar_questions(
|
| 80 |
+
test['prompt'],
|
| 81 |
+
k=5,
|
| 82 |
+
use_adaptive_scoring=True
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Extract key metrics
|
| 86 |
+
baseline_risk = baseline_result['risk_level']
|
| 87 |
+
adaptive_risk = adaptive_result['risk_level']
|
| 88 |
+
|
| 89 |
+
max_sim = max(q['similarity'] for q in adaptive_result['similar_questions'])
|
| 90 |
+
avg_sim = adaptive_result['avg_similarity']
|
| 91 |
+
|
| 92 |
+
baseline_difficulty = baseline_result['weighted_difficulty_score']
|
| 93 |
+
adaptive_difficulty = adaptive_result['weighted_difficulty_score']
|
| 94 |
+
|
| 95 |
+
# Display comparison
|
| 96 |
+
print(f"\nSimilarity Metrics:")
|
| 97 |
+
print(f" Max Similarity: {max_sim:.3f}")
|
| 98 |
+
print(f" Avg Similarity: {avg_sim:.3f}")
|
| 99 |
+
|
| 100 |
+
print(f"\nBASELINE (Naive Weighted Average):")
|
| 101 |
+
print(f" Risk Level: {baseline_risk}")
|
| 102 |
+
print(f" Difficulty Score: {baseline_difficulty:.3f}")
|
| 103 |
+
print(f" Success Rate: {baseline_result['weighted_success_rate']:.1%}")
|
| 104 |
+
|
| 105 |
+
print(f"\nADAPTIVE (With Uncertainty Penalties):")
|
| 106 |
+
print(f" Risk Level: {adaptive_risk}")
|
| 107 |
+
print(f" Difficulty Score: {adaptive_difficulty:.3f}")
|
| 108 |
+
print(f" Success Rate: {adaptive_result['weighted_success_rate']:.1%}")
|
| 109 |
+
|
| 110 |
+
# Highlight if adaptive changed the risk level
|
| 111 |
+
if baseline_risk != adaptive_risk:
|
| 112 |
+
print(f"\n β οΈ RISK LEVEL CHANGED: {baseline_risk} β {adaptive_risk}")
|
| 113 |
+
penalty = adaptive_difficulty - baseline_difficulty
|
| 114 |
+
print(f" Uncertainty Penalty Applied: +{penalty:.3f}")
|
| 115 |
+
else:
|
| 116 |
+
print(f"\n β Risk level unchanged (both {baseline_risk})")
|
| 117 |
+
|
| 118 |
+
# Show top match
|
| 119 |
+
top_match = adaptive_result['similar_questions'][0]
|
| 120 |
+
print(f"\nTop Match:")
|
| 121 |
+
print(f" Source: {top_match['source']} ({top_match['domain']})")
|
| 122 |
+
print(f" Similarity: {top_match['similarity']:.3f}")
|
| 123 |
+
print(f" Question: {top_match['question_text'][:100]}...")
|
| 124 |
+
|
| 125 |
+
print("=" * 100)
|
| 126 |
+
|
| 127 |
+
print("\nβ
Adaptive Scoring Test Complete!")
|
| 128 |
+
print("\nKey Improvements:")
|
| 129 |
+
print(" 1. Low similarity prompts β increased risk (uncertainty penalty)")
|
| 130 |
+
print(" 2. Cross-domain queries β flagged as more risky")
|
| 131 |
+
print(" 3. High similarity matches β minimal/no penalty (confidence in prediction)")
|
| 132 |
+
print("\nNext Steps:")
|
| 133 |
+
print(" - Review NEXT_STEPS_IMPROVEMENTS.md for evaluation framework")
|
| 134 |
+
print(" - Implement nested CV for hyperparameter tuning")
|
| 135 |
+
print(" - Create OOD test sets for comprehensive evaluation")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
try:
|
| 140 |
+
test_adaptive_scoring()
|
| 141 |
+
except KeyboardInterrupt:
|
| 142 |
+
print("\n\nTest interrupted by user.")
|
| 143 |
+
sys.exit(0)
|
| 144 |
+
except Exception as e:
|
| 145 |
+
print(f"\n\nβ Error during testing: {e}")
|
| 146 |
+
import traceback
|
| 147 |
+
traceback.print_exc()
|
| 148 |
+
sys.exit(1)
|