Improve evaluation metrics, optimize parameter naming, fix label index error

This commit is contained in:
Zzier
2025-08-27 20:55:39 +08:00
parent dd150d65a0
commit 437a24da50
+33 -31
View File
@@ -5,7 +5,6 @@ from utils import get_msg_mgr, mkdir
from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many
from .re_rank import re_ranking from .re_rank import re_ranking
from sklearn.metrics import confusion_matrix, accuracy_score
def de_diag(acc, each_angle=False): def de_diag(acc, each_angle=False):
# Exclude identical-view cases # Exclude identical-view cases
@@ -417,46 +416,49 @@ def evaluate_CCPG(data, dataset, metric='euc'):
return result_dict return result_dict
def evaluate_scoliosis(data, dataset, metric='euc'): def evaluate_scoliosis(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()
feature, label, class_id, view = data['embeddings'], data['labels'], data['types'], data['views'] from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
label = np.array(label) logits = np.array(data['embeddings'])
class_id = np.array(class_id) labels = data['types']
# Update class_id with integer labels based on status # Label mapping: negative->0, neutral->1, positive->2
class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id]) label_map = {'negative': 0, 'neutral': 1, 'positive': 2}
print('class_id=', class_id_int) true_ids = np.array([label_map[status] for status in labels])
features = np.array(feature) pred_ids = np.argmax(logits.mean(-1), axis=-1)
c_id_int = np.argmax(features.mean(-1), axis=-1)
print('predicted_labels', c_id_int)
# Calculate sensitivity and specificity # Calculate evaluation metrics
cm = confusion_matrix(class_id_int, c_id_int, labels=[0, 1, 2]) # Total Accuracy: proportion of correctly predicted samples among all samples
FP = cm.sum(axis=0) - np.diag(cm) accuracy = accuracy_score(true_ids, pred_ids)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate # Macro-average Precision: average of precision scores for each class
TPR = TP / (TP + FN) precision = precision_score(true_ids, pred_ids, average='macro', zero_division=0)
# Specificity or true negative rate
TNR = TN / (TN + FP)
accuracy = accuracy_score(class_id_int, c_id_int)
result_dict = {} # Macro-average Recall: average of recall scores for each class
result_dict["scalar/test_accuracy/"] = accuracy recall = recall_score(true_ids, pred_ids, average='macro', zero_division=0)
result_dict["scalar/test_sensitivity/"] = TPR
result_dict["scalar/test_specificity/"] = TNR
# Printing the sensitivity and specificity # Macro-average F1: average of F1 scores for each class
for i, cls in enumerate(['Positive']): f1 = f1_score(true_ids, pred_ids, average='macro', zero_division=0)
print(f"{cls} Sensitivity (Recall): {TPR[i] * 100:.2f}%")
print(f"{cls} Specificity: {TNR[i] * 100:.2f}%")
print(f"Accuracy: {accuracy * 100:.2f}%")
return result_dict # Confusion matrix (for debugging)
# cm = confusion_matrix(true_ids, pred_ids, labels=[0, 1, 2])
# class_names = ['Negative', 'Neutral', 'Positive']
# Print results
msg_mgr.log_info(f"Total Accuracy: {accuracy*100:.2f}%")
msg_mgr.log_info(f"Macro-avg Precision: {precision*100:.2f}%")
msg_mgr.log_info(f"Macro-avg Recall: {recall*100:.2f}%")
msg_mgr.log_info(f"Macro-avg F1 Score: {f1*100:.2f}%")
return {
"scalar/test_accuracy/": accuracy,
"scalar/test_precision/": precision,
"scalar/test_recall/": recall,
"scalar/test_f1/": f1
}
def evaluate_FreeGait(data, dataset, metric='euc'): def evaluate_FreeGait(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()