Improve evaluation metrics, optimize parameter naming, fix label index error

This commit is contained in:
Zzier
2025-08-27 20:55:39 +08:00
parent dd150d65a0
commit 437a24da50
+41 -39
View File
@@ -5,7 +5,6 @@ from utils import get_msg_mgr, mkdir
from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many
from .re_rank import re_ranking from .re_rank import re_ranking
from sklearn.metrics import confusion_matrix, accuracy_score
def de_diag(acc, each_angle=False): def de_diag(acc, each_angle=False):
# Exclude identical-view cases # Exclude identical-view cases
@@ -417,46 +416,49 @@ def evaluate_CCPG(data, dataset, metric='euc'):
return result_dict return result_dict
def evaluate_scoliosis(data, dataset, metric='euc'): def evaluate_scoliosis(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
feature, label, class_id, view = data['embeddings'], data['labels'], data['types'], data['views'] logits = np.array(data['embeddings'])
labels = data['types']
label = np.array(label)
class_id = np.array(class_id) # Label mapping: negative->0, neutral->1, positive->2
label_map = {'negative': 0, 'neutral': 1, 'positive': 2}
# Update class_id with integer labels based on status true_ids = np.array([label_map[status] for status in labels])
class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id])
print('class_id=', class_id_int) pred_ids = np.argmax(logits.mean(-1), axis=-1)
features = np.array(feature) # Calculate evaluation metrics
c_id_int = np.argmax(features.mean(-1), axis=-1) # Total Accuracy: proportion of correctly predicted samples among all samples
print('predicted_labels', c_id_int) accuracy = accuracy_score(true_ids, pred_ids)
# Calculate sensitivity and specificity # Macro-average Precision: average of precision scores for each class
cm = confusion_matrix(class_id_int, c_id_int, labels=[0, 1, 2]) precision = precision_score(true_ids, pred_ids, average='macro', zero_division=0)
FP = cm.sum(axis=0) - np.diag(cm)
FN = cm.sum(axis=1) - np.diag(cm) # Macro-average Recall: average of recall scores for each class
TP = np.diag(cm) recall = recall_score(true_ids, pred_ids, average='macro', zero_division=0)
TN = cm.sum() - (FP + FN + TP)
# Macro-average F1: average of F1 scores for each class
# Sensitivity, hit rate, recall, or true positive rate f1 = f1_score(true_ids, pred_ids, average='macro', zero_division=0)
TPR = TP / (TP + FN)
# Specificity or true negative rate # Confusion matrix (for debugging)
TNR = TN / (TN + FP) # cm = confusion_matrix(true_ids, pred_ids, labels=[0, 1, 2])
accuracy = accuracy_score(class_id_int, c_id_int) # class_names = ['Negative', 'Neutral', 'Positive']
result_dict = {} # Print results
result_dict["scalar/test_accuracy/"] = accuracy msg_mgr.log_info(f"Total Accuracy: {accuracy*100:.2f}%")
result_dict["scalar/test_sensitivity/"] = TPR msg_mgr.log_info(f"Macro-avg Precision: {precision*100:.2f}%")
result_dict["scalar/test_specificity/"] = TNR msg_mgr.log_info(f"Macro-avg Recall: {recall*100:.2f}%")
msg_mgr.log_info(f"Macro-avg F1 Score: {f1*100:.2f}%")
# Printing the sensitivity and specificity
for i, cls in enumerate(['Positive']): return {
print(f"{cls} Sensitivity (Recall): {TPR[i] * 100:.2f}%") "scalar/test_accuracy/": accuracy,
print(f"{cls} Specificity: {TNR[i] * 100:.2f}%") "scalar/test_precision/": precision,
print(f"Accuracy: {accuracy * 100:.2f}%") "scalar/test_recall/": recall,
"scalar/test_f1/": f1
return result_dict }
def evaluate_FreeGait(data, dataset, metric='euc'): def evaluate_FreeGait(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()