Merge pull request #285 from zhouzi180/master

Update for Scoliosis1K Dataset
This commit is contained in:
Chengwei Ye
2025-08-27 21:04:02 +08:00
committed by GitHub
4 changed files with 123 additions and 70 deletions
+77 -26
View File
@@ -1,34 +1,85 @@
# Tutorial for [Scoliosis1K](https://zhouzi180.github.io/Scoliosis1K) # Tutorial for [Scoliosis1K](https://zhouzi180.github.io/Scoliosis1K)
## Download the Scoliosis1K Dataset
## Download the Scoliosis1K dataset You can download the dataset from the [official website](https://zhouzi180.github.io/Scoliosis1K).
Download the dataset from the [link](https://zhouzi180.github.io/Scoliosis1K). The dataset is provided as four compressed files:
decompress these two file by following command:
```shell
unzip -P password Scoliosis1K-pkl.zip | xargs -n1 tar xzvf
```
password should be obtained by signing [agreement](https://zhouzi180.github.io/Scoliosis1K/static/resources/Scoliosis1KAgreement.pdf) and sending to email (12331257@mail.sustech.edu.cn)
Then you will get Scoliosis1K formatted as: * `Scoliosis1K-sil-raw.zip`
``` * `Scoliosis1K-sil-pkl.zip`
DATASET_ROOT/ * `Scoliosis1K-pose-raw.zip`
00000 (subject)/ * `Scoliosis1K-pose-pkl.zip`
positive (category)/
000-180 (view)/ We recommend using the provided pickle (`.pkl`) files for convenience.
000.pkl (contains all frames) Decompress them with the following commands:
......
``` ```bash
## Train the dataset unzip -P <password> Scoliosis1K-sil-pkl.zip
Modify the `dataset_root` in `configs/sconet/sconet_scoliosis1k.yaml`, and then run this command: unzip -P <password> Scoliosis1K-pose-pkl.zip
```shell
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase train
``` ```
> **Note**: The \<password\> can be obtained by signing the [release agreement](https://zhouzi180.github.io/Scoliosis1K/static/resources/Scoliosis1k_release_agreement.pdf) and sending it to **[12331257@mail.sustech.edu.cn](mailto:12331257@mail.sustech.edu.cn)**.
## Process from RAW dataset ### Dataset Structure
After decompression, you will get the following structure:
### Preprocess the dataset (Optional) ```
Download the raw dataset from the [official link](https://zhouzi180.github.io/Scoliosis1K). You will get two compressed files, i.e. `Scoliosis1K-raw.zip`, and `Scoliosis1K-pkl.zip`. ├── Scoliosis1K-sil-pkl
We recommend using our provided pickle files for convenience, or process raw dataset into pickle by this command: │ ├── 00000 # Identity
```shell │ │ ├── Positive # Class
python datasets/pretreatment.py --input_path Scoliosis1K_raw --output_path Scoliosis1K-pkl │ │ │ ├── 000_180 # View
│ │ │ └── 000_180.pkl # Estimated Silhouette (PP-HumanSeg v2)
├── Scoliosis1K-pose-pkl
│ ├── 00000 # Identity
│ │ ├── Positive # Class
│ │ │ ├── 000_180 # View
│ │ │ └── 000_180.pkl # Estimated 2D Pose (ViTPose)
```
### Processing from RAW Dataset (optional)
If you prefer, you can process the raw dataset into `.pkl` format.
```bash
# For silhouette raw data
python datasets/pretreatment.py --input_path=<path_to_raw_silhouettes> -output_path=<output_path>
# For pose raw data
python datasets/pretreatment.py --input_path=<path_to_raw_pose> -output_path=<output_path> --pose --dataset=OUMVLP
```
---
## Training and Testing
Before training or testing, modify the `dataset_root` field in
`configs/sconet/sconet_scoliosis1k.yaml`.
Then run the following commands:
```bash
# Training
CUDA_VISIBLE_DEVICES=0,1,2,3 \
python -m torch.distributed.launch --nproc_per_node=4 \
opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase train --log_to_file
# Testing
CUDA_VISIBLE_DEVICES=0,1,2,3 \
python -m torch.distributed.launch --nproc_per_node=4 \
opengait/main.py --cfgs configs/sconet/sconet_scoliosis1k.yaml --phase test --log_to_file
```
---
## Pose-to-Heatmap Conversion
*From our paper: **Pose as Clinical Prior: Learning Dual Representations for Scoliosis Screening (MICCAI 2025)***
```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 \
python -m torch.distributed.launch --nproc_per_node=4 \
datasets/pretreatment_heatmap.py \
--pose_data_path=<path_to_pose_pkl> \
--save_root=<output_path> \
--dataset_name=OUMVLP
``` ```
+33 -31
View File
@@ -5,7 +5,6 @@ from utils import get_msg_mgr, mkdir
from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank, evaluate_many
from .re_rank import re_ranking from .re_rank import re_ranking
from sklearn.metrics import confusion_matrix, accuracy_score
def de_diag(acc, each_angle=False): def de_diag(acc, each_angle=False):
# Exclude identical-view cases # Exclude identical-view cases
@@ -417,46 +416,49 @@ def evaluate_CCPG(data, dataset, metric='euc'):
return result_dict return result_dict
def evaluate_scoliosis(data, dataset, metric='euc'): def evaluate_scoliosis(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()
feature, label, class_id, view = data['embeddings'], data['labels'], data['types'], data['views'] from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
label = np.array(label) logits = np.array(data['embeddings'])
class_id = np.array(class_id) labels = data['types']
# Update class_id with integer labels based on status # Label mapping: negative->0, neutral->1, positive->2
class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id]) label_map = {'negative': 0, 'neutral': 1, 'positive': 2}
print('class_id=', class_id_int) true_ids = np.array([label_map[status] for status in labels])
features = np.array(feature) pred_ids = np.argmax(logits.mean(-1), axis=-1)
c_id_int = np.argmax(features.mean(-1), axis=-1)
print('predicted_labels', c_id_int)
# Calculate sensitivity and specificity # Calculate evaluation metrics
cm = confusion_matrix(class_id_int, c_id_int, labels=[0, 1, 2]) # Total Accuracy: proportion of correctly predicted samples among all samples
FP = cm.sum(axis=0) - np.diag(cm) accuracy = accuracy_score(true_ids, pred_ids)
FN = cm.sum(axis=1) - np.diag(cm)
TP = np.diag(cm)
TN = cm.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate # Macro-average Precision: average of precision scores for each class
TPR = TP / (TP + FN) precision = precision_score(true_ids, pred_ids, average='macro', zero_division=0)
# Specificity or true negative rate
TNR = TN / (TN + FP)
accuracy = accuracy_score(class_id_int, c_id_int)
result_dict = {} # Macro-average Recall: average of recall scores for each class
result_dict["scalar/test_accuracy/"] = accuracy recall = recall_score(true_ids, pred_ids, average='macro', zero_division=0)
result_dict["scalar/test_sensitivity/"] = TPR
result_dict["scalar/test_specificity/"] = TNR
# Printing the sensitivity and specificity # Macro-average F1: average of F1 scores for each class
for i, cls in enumerate(['Positive']): f1 = f1_score(true_ids, pred_ids, average='macro', zero_division=0)
print(f"{cls} Sensitivity (Recall): {TPR[i] * 100:.2f}%")
print(f"{cls} Specificity: {TNR[i] * 100:.2f}%")
print(f"Accuracy: {accuracy * 100:.2f}%")
return result_dict # Confusion matrix (for debugging)
# cm = confusion_matrix(true_ids, pred_ids, labels=[0, 1, 2])
# class_names = ['Negative', 'Neutral', 'Positive']
# Print results
msg_mgr.log_info(f"Total Accuracy: {accuracy*100:.2f}%")
msg_mgr.log_info(f"Macro-avg Precision: {precision*100:.2f}%")
msg_mgr.log_info(f"Macro-avg Recall: {recall*100:.2f}%")
msg_mgr.log_info(f"Macro-avg F1 Score: {f1*100:.2f}%")
return {
"scalar/test_accuracy/": accuracy,
"scalar/test_precision/": precision,
"scalar/test_recall/": recall,
"scalar/test_f1/": f1
}
def evaluate_FreeGait(data, dataset, metric='euc'): def evaluate_FreeGait(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()
+5 -5
View File
@@ -16,10 +16,10 @@ class ScoNet(BaseModel):
self.HPP = HorizontalPoolingPyramid(bin_num=model_cfg['bin_num']) self.HPP = HorizontalPoolingPyramid(bin_num=model_cfg['bin_num'])
def forward(self, inputs): def forward(self, inputs):
ipts, labs, class_id, _, seqL = inputs ipts, pids, labels, _, seqL = inputs
class_id_int = np.array([1 if status == 'positive' else 2 if status == 'neutral' else 0 for status in class_id]) # Label mapping: negative->0, neutral->1, positive->2
class_id = torch.tensor(class_id_int).cuda() label_ids = np.array([{'negative': 0, 'neutral': 1, 'positive': 2}[status] for status in labels])
sils = ipts[0] sils = ipts[0]
if len(sils.size()) == 4: if len(sils.size()) == 4:
@@ -40,8 +40,8 @@ class ScoNet(BaseModel):
embed = embed_1 embed = embed_1
retval = { retval = {
'training_feat': { 'training_feat': {
'triplet': {'embeddings': embed, 'labels': labs}, 'triplet': {'embeddings': embed, 'labels': pids},
'softmax': {'logits': logits, 'labels': class_id}, 'softmax': {'logits': logits, 'labels': label_ids},
}, },
'visual_summary': { 'visual_summary': {
'image/sils': rearrange(sils,'n c s h w -> (n s) c h w') 'image/sils': rearrange(sils,'n c s h w -> (n s) c h w')