From dd150d65a0c8e33c02d9ef05e733faf6243d7888 Mon Sep 17 00:00:00 2001 From: Zzier Date: Wed, 27 Aug 2025 20:54:12 +0800 Subject: [PATCH 1/4] Optimize parameter naming, fix label index error --- opengait/modeling/models/sconet.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/opengait/modeling/models/sconet.py b/opengait/modeling/models/sconet.py index a977e81..c61c147 100644 --- a/opengait/modeling/models/sconet.py +++ b/opengait/modeling/models/sconet.py @@ -16,10 +16,10 @@ class ScoNet(BaseModel): self.HPP = HorizontalPoolingPyramid(bin_num=model_cfg['bin_num']) def forward(self, inputs): - ipts, labs, class_id, _, seqL = inputs + ipts, pids, labels, _, seqL = inputs - class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id]) - class_id = torch.tensor(class_id_int).cuda() + # Label mapping: negative->0, neutral->1, positive->2 + label_ids = np.array([{'negative': 0, 'neutral': 1, 'positive': 2}[status] for status in labels]) sils = ipts[0] if len(sils.size()) == 4: @@ -40,8 +40,8 @@ class ScoNet(BaseModel): embed = embed_1 retval = { 'training_feat': { - 'triplet': {'embeddings': embed, 'labels': labs}, - 'softmax': {'logits': logits, 'labels': class_id}, + 'triplet': {'embeddings': embed, 'labels': pids}, + 'softmax': {'logits': logits, 'labels': label_ids}, }, 'visual_summary': { 'image/sils': rearrange(sils,'n c s h w -> (n s) c h w') From 437a24da50c4f7937440e5351cd2ee1cb41fe03e Mon Sep 17 00:00:00 2001 From: Zzier Date: Wed, 27 Aug 2025 20:55:39 +0800 Subject: [PATCH 2/4] Improve evaluation metrics, optimize parameter naming, fix label index error --- opengait/evaluation/evaluator.py | 80 ++++++++++++++++---------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/opengait/evaluation/evaluator.py b/opengait/evaluation/evaluator.py index cf93374..053b7c4 100644 --- a/opengait/evaluation/evaluator.py +++ b/opengait/evaluation/evaluator.py @@ -5,7 +5,6 @@ from utils import get_msg_mgr, mkdir from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many from .re_rank import re_ranking -from sklearn.metrics import confusion_matrix, accuracy_score def de_diag(acc, each_angle=False): # Exclude identical-view cases @@ -417,46 +416,49 @@ def evaluate_CCPG(data, dataset, metric='euc'): return result_dict def evaluate_scoliosis(data, dataset, metric='euc'): + msg_mgr = get_msg_mgr() + + from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix - feature, label, class_id, view = data['embeddings'], data['labels'], data['types'], data['views'] - - label = np.array(label) - class_id = np.array(class_id) - - # Update class_id with integer labels based on status - class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id]) - print('class_id=', class_id_int) - - features = np.array(feature) - c_id_int = np.argmax(features.mean(-1), axis=-1) - print('predicted_labels', c_id_int) - - # Calculate sensitivity and specificity - cm = confusion_matrix(class_id_int, c_id_int, labels=[0, 1, 2]) - FP = cm.sum(axis=0) - np.diag(cm) - FN = cm.sum(axis=1) - np.diag(cm) - TP = np.diag(cm) - TN = cm.sum() - (FP + FN + TP) - - # Sensitivity, hit rate, recall, or true positive rate - TPR = TP / (TP + FN) - # Specificity or true negative rate - TNR = TN / (TN + FP) - accuracy = accuracy_score(class_id_int, c_id_int) - - result_dict = {} - result_dict["scalar/test_accuracy/"] = accuracy - result_dict["scalar/test_sensitivity/"] = TPR - result_dict["scalar/test_specificity/"] = TNR - - # Printing the sensitivity and specificity - for i, cls in enumerate(['Positive']): - print(f"{cls} Sensitivity (Recall): {TPR[i] * 100:.2f}%") - print(f"{cls} Specificity: {TNR[i] * 100:.2f}%") - print(f"Accuracy: {accuracy * 100:.2f}%") - - return result_dict + logits = np.array(data['embeddings']) + labels = data['types'] + + # Label mapping: negative->0, neutral->1, positive->2 + label_map = {'negative': 0, 'neutral': 1, 'positive': 2} + true_ids = np.array([label_map[status] for status in labels]) + + pred_ids = np.argmax(logits.mean(-1), axis=-1) + + # Calculate evaluation metrics + # Total Accuracy: proportion of correctly predicted samples among all samples + accuracy = accuracy_score(true_ids, pred_ids) + + # Macro-average Precision: average of precision scores for each class + precision = precision_score(true_ids, pred_ids, average='macro', zero_division=0) + + # Macro-average Recall: average of recall scores for each class + recall = recall_score(true_ids, pred_ids, average='macro', zero_division=0) + + # Macro-average F1: average of F1 scores for each class + f1 = f1_score(true_ids, pred_ids, average='macro', zero_division=0) + + # Confusion matrix (for debugging) + # cm = confusion_matrix(true_ids, pred_ids, labels=[0, 1, 2]) + # class_names = ['Negative', 'Neutral', 'Positive'] + + # Print results + msg_mgr.log_info(f"Total Accuracy: {accuracy*100:.2f}%") + msg_mgr.log_info(f"Macro-avg Precision: {precision*100:.2f}%") + msg_mgr.log_info(f"Macro-avg Recall: {recall*100:.2f}%") + msg_mgr.log_info(f"Macro-avg F1 Score: {f1*100:.2f}%") + + return { + "scalar/test_accuracy/": accuracy, + "scalar/test_precision/": precision, + "scalar/test_recall/": recall, + "scalar/test_f1/": f1 + } def evaluate_FreeGait(data, dataset, metric='euc'): msg_mgr = get_msg_mgr() From 9475368b6f02bd8579c5139f522830df4f3682d7 Mon Sep 17 00:00:00 2001 From: Zzier Date: Wed, 27 Aug 2025 20:57:48 +0800 Subject: [PATCH 3/4] Update README.md - Added instructions for using 2D pose data - Added guidelines for converting 2D pose data into heatmaps --- datasets/Scoliosis1K/README.md | 103 ++++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 26 deletions(-) diff --git a/datasets/Scoliosis1K/README.md b/datasets/Scoliosis1K/README.md index d9a86c5..ce75a9b 100644 --- a/datasets/Scoliosis1K/README.md +++ b/datasets/Scoliosis1K/README.md @@ -1,34 +1,85 @@ # Tutorial for [Scoliosis1K](https://zhouzi180.github.io/Scoliosis1K) +## Download the Scoliosis1K Dataset -## Download the Scoliosis1K dataset -Download the dataset from the [link](https://zhouzi180.github.io/Scoliosis1K). -decompress these two file by following command: -```shell -unzip -P password Scoliosis1K-pkl.zip | xargs -n1 tar xzvf -``` -password should be obtained by signing [agreement](https://zhouzi180.github.io/Scoliosis1K/static/resources/Scoliosis1KAgreement.pdf) and sending to email (12331257@mail.sustech.edu.cn) +You can download the dataset from the [official website](https://zhouzi180.github.io/Scoliosis1K). +The dataset is provided as four compressed files: -Then you will get Scoliosis1K formatted as: -``` - DATASET_ROOT/ - 00000 (subject)/ - positive (category)/ - 000-180 (view)/ - 000.pkl (contains all frames) - ...... -``` -## Train the dataset -Modify the `dataset_root` in `configs/sconet/sconet_scoliosis1k.yaml`, and then run this command: -```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase train +* `Scoliosis1K-sil-raw.zip` +* `Scoliosis1K-sil-pkl.zip` +* `Scoliosis1K-pose-raw.zip` +* `Scoliosis1K-pose-pkl.zip` + +We recommend using the provided pickle (`.pkl`) files for convenience. +Decompress them with the following commands: + +```bash +unzip -P Scoliosis1K-sil-pkl.zip +unzip -P Scoliosis1K-pose-pkl.zip ``` +> **Note**: The \ can be obtained by signing the [release agreement](https://zhouzi180.github.io/Scoliosis1K/static/resources/Scoliosis1k_release_agreement.pdf) and sending it to **[12331257@mail.sustech.edu.cn](mailto:12331257@mail.sustech.edu.cn)**. -## Process from RAW dataset +### Dataset Structure + +After decompression, you will get the following structure: -### Preprocess the dataset (Optional) -Download the raw dataset from the [official link](https://zhouzi180.github.io/Scoliosis1K). You will get two compressed files, i.e. `Scoliosis1K-raw.zip`, and `Scoliosis1K-pkl.zip`. -We recommend using our provided pickle files for convenience, or process raw dataset into pickle by this command: -```shell -python datasets/pretreatment.py --input_path Scoliosis1K_raw --output_path Scoliosis1K-pkl ``` +├── Scoliosis1K-sil-pkl +│ ├── 00000 # Identity +│ │ ├── Positive # Class +│ │ │ ├── 000_180 # View +│ │ │ └── 000_180.pkl # Estimated Silhouette (PP-HumanSeg v2) +│ +├── Scoliosis1K-pose-pkl +│ ├── 00000 # Identity +│ │ ├── Positive # Class +│ │ │ ├── 000_180 # View +│ │ │ └── 000_180.pkl # Estimated 2D Pose (ViTPose) +``` + +### Processing from RAW Dataset (optional) + +If you prefer, you can process the raw dataset into `.pkl` format. + +```bash +# For silhouette raw data +python datasets/pretreatment.py --input_path= -output_path= + +# For pose raw data +python datasets/pretreatment.py --input_path= -output_path= --pose --dataset=OUMVLP +``` +--- + +## Training and Testing + +Before training or testing, modify the `dataset_root` field in +`configs/sconet/sconet_scoliosis1k.yaml`. + +Then run the following commands: + +```bash +# Training +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 \ +opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase train --log_to_file + +# Testing +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 \ +opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase test --log_to_file +``` + +--- + +## Pose-to-Heatmap Conversion + +*From our paper: **Pose as Clinical Prior: Learning Dual Representations for Scoliosis Screening (MICCAI 2025)*** + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 \ +datasets/pretreatment_heatmap.py \ + --pose_data_path= \ + --save_root= \ + --dataset_name=OUMVLP +``` \ No newline at end of file From 6377526c7fca3225857b5a97bd532b86d466558f Mon Sep 17 00:00:00 2001 From: Zzier Date: Wed, 27 Aug 2025 20:58:24 +0800 Subject: [PATCH 4/4] Fix naming error --- .../Scoliosis1K/{Scoliosis1K_1112.json => Scoliosis1K_1116.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename datasets/Scoliosis1K/{Scoliosis1K_1112.json => Scoliosis1K_1116.json} (100%) diff --git a/datasets/Scoliosis1K/Scoliosis1K_1112.json b/datasets/Scoliosis1K/Scoliosis1K_1116.json similarity index 100% rename from datasets/Scoliosis1K/Scoliosis1K_1112.json rename to datasets/Scoliosis1K/Scoliosis1K_1116.json