13 Commits

20 changed files with 3884 additions and 165 deletions

178
.gitignore vendored
View File

@ -1 +1,179 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# user
.DS_Store
*.parquet
*.png

View File

@ -1,4 +1,7 @@
{
"python.analysis.autoImportCompletions": true,
"python.analysis.typeCheckingMode": "standard"
"python.analysis.typeCheckingMode": "standard",
"cSpell.words": [
"timescape"
]
}

135
2025-06-10_16_26.CSV Normal file
View File

@ -0,0 +1,135 @@
time,hr
00:00:00,113
00:00:00,111
00:00:01,113
00:00:02,114
00:00:03,113
00:00:04,114
00:00:05,113
00:00:06,114
00:00:07,114
00:00:08,114
00:00:09,114
00:00:10,114
00:00:11,114
00:00:12,114
00:00:13,114
00:00:14,114
00:00:15,114
00:00:16,114
00:00:17,113
00:00:18,113
00:00:19,113
00:00:20,112
00:00:21,112
00:00:22,112
00:00:23,113
00:00:24,113
00:00:25,114
00:00:26,115
00:00:27,115
00:00:28,116
00:00:29,117
00:00:30,118
00:00:31,118
00:00:32,119
00:00:33,119
00:00:34,120
00:00:35,121
00:00:36,121
00:00:37,121
00:00:38,121
00:00:39,121
00:00:40,122
00:00:41,122
00:00:42,122
00:00:43,122
00:00:44,122
00:00:45,122
00:00:46,122
00:00:47,121
00:00:48,122
00:00:49,122
00:00:50,122
00:00:51,122
00:00:52,122
00:00:53,122
00:00:54,122
00:00:55,122
00:00:56,123
00:00:57,123
00:00:58,123
00:00:59,123
00:01:00,123
00:01:01,124
00:01:02,124
00:01:03,124
00:01:04,124
00:01:05,124
00:01:06,125
00:01:07,125
00:01:08,126
00:01:09,127
00:01:10,128
00:01:11,129
00:01:12,129
00:01:13,131
00:01:14,132
00:01:15,133
00:01:16,135
00:01:17,137
00:01:18,139
00:01:19,140
00:01:20,140
00:01:21,141
00:01:22,141
00:01:23,141
00:01:24,142
00:01:25,142
00:01:26,142
00:01:27,141
00:01:28,141
00:01:29,141
00:01:30,140
00:01:31,140
00:01:32,139
00:01:33,139
00:01:34,139
00:01:35,139
00:01:36,138
00:01:37,138
00:01:38,138
00:01:39,137
00:01:40,136
00:01:41,135
00:01:42,134
00:01:43,133
00:01:44,131
00:01:45,131
00:01:46,131
00:01:47,130
00:01:48,129
00:01:49,129
00:01:50,129
00:01:51,129
00:01:52,128
00:01:53,128
00:01:54,127
00:01:55,127
00:01:56,126
00:01:57,126
00:01:58,126
00:01:59,125
00:02:00,125
00:02:01,124
00:02:02,124
00:02:03,124
00:02:04,124
00:02:05,123
00:02:06,123
00:02:07,123
00:02:08,124
00:02:09,124
00:02:10,124
00:02:11,125
00:02:12,125
1 time hr
2 00:00:00 113
3 00:00:00 111
4 00:00:01 113
5 00:00:02 114
6 00:00:03 113
7 00:00:04 114
8 00:00:05 113
9 00:00:06 114
10 00:00:07 114
11 00:00:08 114
12 00:00:09 114
13 00:00:10 114
14 00:00:11 114
15 00:00:12 114
16 00:00:13 114
17 00:00:14 114
18 00:00:15 114
19 00:00:16 114
20 00:00:17 113
21 00:00:18 113
22 00:00:19 113
23 00:00:20 112
24 00:00:21 112
25 00:00:22 112
26 00:00:23 113
27 00:00:24 113
28 00:00:25 114
29 00:00:26 115
30 00:00:27 115
31 00:00:28 116
32 00:00:29 117
33 00:00:30 118
34 00:00:31 118
35 00:00:32 119
36 00:00:33 119
37 00:00:34 120
38 00:00:35 121
39 00:00:36 121
40 00:00:37 121
41 00:00:38 121
42 00:00:39 121
43 00:00:40 122
44 00:00:41 122
45 00:00:42 122
46 00:00:43 122
47 00:00:44 122
48 00:00:45 122
49 00:00:46 122
50 00:00:47 121
51 00:00:48 122
52 00:00:49 122
53 00:00:50 122
54 00:00:51 122
55 00:00:52 122
56 00:00:53 122
57 00:00:54 122
58 00:00:55 122
59 00:00:56 123
60 00:00:57 123
61 00:00:58 123
62 00:00:59 123
63 00:01:00 123
64 00:01:01 124
65 00:01:02 124
66 00:01:03 124
67 00:01:04 124
68 00:01:05 124
69 00:01:06 125
70 00:01:07 125
71 00:01:08 126
72 00:01:09 127
73 00:01:10 128
74 00:01:11 129
75 00:01:12 129
76 00:01:13 131
77 00:01:14 132
78 00:01:15 133
79 00:01:16 135
80 00:01:17 137
81 00:01:18 139
82 00:01:19 140
83 00:01:20 140
84 00:01:21 141
85 00:01:22 141
86 00:01:23 141
87 00:01:24 142
88 00:01:25 142
89 00:01:26 142
90 00:01:27 141
91 00:01:28 141
92 00:01:29 141
93 00:01:30 140
94 00:01:31 140
95 00:01:32 139
96 00:01:33 139
97 00:01:34 139
98 00:01:35 139
99 00:01:36 138
100 00:01:37 138
101 00:01:38 138
102 00:01:39 137
103 00:01:40 136
104 00:01:41 135
105 00:01:42 134
106 00:01:43 133
107 00:01:44 131
108 00:01:45 131
109 00:01:46 131
110 00:01:47 130
111 00:01:48 129
112 00:01:49 129
113 00:01:50 129
114 00:01:51 129
115 00:01:52 128
116 00:01:53 128
117 00:01:54 127
118 00:01:55 127
119 00:01:56 126
120 00:01:57 126
121 00:01:58 126
122 00:01:59 125
123 00:02:00 125
124 00:02:01 124
125 00:02:02 124
126 00:02:03 124
127 00:02:04 124
128 00:02:05 123
129 00:02:06 123
130 00:02:07 123
131 00:02:08 124
132 00:02:09 124
133 00:02:10 124
134 00:02:11 125
135 00:02:12 125

213
app/model/__init__.py Normal file
View File

@ -0,0 +1,213 @@
from dataclasses import dataclass
from enum import IntEnum
import struct
from typing import ClassVar, Tuple, Final, LiteralString
from pydantic import BaseModel, Field, computed_field
class AlgoOpMode(IntEnum):
"""Equivalent to max::ALGO_OP_MODE"""
CONTINUOUS_HRM_CONTINUOUS_SPO2 = 0x00 # Continuous HRM, continuous SpO2
CONTINUOUS_HRM_ONE_SHOT_SPO2 = 0x01 # Continuous HRM, one-shot SpO2
CONTINUOUS_HRM = 0x02 # Continuous HRM
SAMPLED_HRM = 0x03 # Sampled HRM
SAMPLED_HRM_ONE_SHOT_SPO2 = 0x04 # Sampled HRM, one-shot SpO2
ACTIVITY_TRACKING_ONLY = 0x05 # Activity tracking only
SPO2_CALIBRATION = 0x06 # SpO2 calibration
class ActivateClass(IntEnum):
"""Equivalent to max::ACTIVATE_CLASS"""
REST = 0
WALK = 1
RUN = 2
BIKE = 3
class SPO2State(IntEnum):
"""Equivalent to max::SPO2_STATE"""
LED_ADJUSTMENT = 0
COMPUTATION = 1
SUCCESS = 2
TIMEOUT = 3
class SCDState(IntEnum):
"""Equivalent to max::SCD_STATE"""
UNDETECTED = 0
OFF_SKIN = 1
ON_SOME_SUBJECT = 2
ON_SKIN = 3
class AlgoModelData(BaseModel):
op_mode: AlgoOpMode
hr: int # uint16, 10x calculated heart rate
hr_conf: int # uint8, confidence level in %
rr: int # uint16, 10x RR interval in ms
rr_conf: int # uint8
activity_class: ActivateClass
r: int # uint16, 1000x SpO2 R value
spo2_conf: int # uint8
spo2: int # uint16, 10x SpO2 %
spo2_percent_complete: int # uint8
spo2_low_signal_quality_flag: int # uint8
spo2_motion_flag: int # uint8
spo2_low_pi_flag: int # uint8
spo2_unreliable_r_flag: int # uint8
spo2_state: SPO2State
scd_contact_state: SCDState
# don't include reserved into the struct
# uint32
_FORMAT: ClassVar[LiteralString] = "<BHBHBBHBHBBBBBBBI"
@computed_field
@property
def hr_f(self) -> float:
"""Heart rate in beats per minute"""
return self.hr / 10.0
@computed_field
@property
def spo2_f(self) -> float:
"""SpO2 percentage"""
return self.spo2 / 10.0
@computed_field
@property
def r_f(self) -> float:
"""SpO2 R value"""
return self.r / 1000.0
@computed_field
@property
def rr_f(self) -> float:
"""RR interval in milliseconds"""
return self.rr / 10.0
@classmethod
def unmarshal(cls, data: bytes) -> "AlgoModelData":
values = struct.unpack(cls._FORMAT, data)
return cls(
op_mode=values[0],
hr=values[1],
hr_conf=values[2],
rr=values[3],
rr_conf=values[4],
activity_class=values[5],
r=values[6],
spo2_conf=values[7],
spo2=values[8],
spo2_percent_complete=values[9],
spo2_low_signal_quality_flag=values[10],
spo2_motion_flag=values[11],
spo2_low_pi_flag=values[12],
spo2_unreliable_r_flag=values[13],
spo2_state=values[14],
scd_contact_state=values[15],
)
class AlgoReport(BaseModel):
led_1: int # uint32
led_2: int # uint32
led_3: int # uint32
accel_x: int # int16, in uint of g
accel_y: int # int16, in uint of g
accel_z: int # int16, in uint of g
data: AlgoModelData
@classmethod
def unmarshal(cls, buf: bytes) -> "AlgoReport":
FORMAT: Final[str] = "<IIIhhh"
led_1, led_2, led_3, accel_x, accel_y, accel_z = struct.unpack(
FORMAT, buf[: struct.calcsize(FORMAT)]
)
data = AlgoModelData.unmarshal(buf[struct.calcsize(FORMAT) :])
return cls(
led_1=led_1,
led_2=led_2,
led_3=led_3,
accel_x=accel_x,
accel_y=accel_y,
accel_z=accel_z,
data=data,
)
class HrConfidence(IntEnum):
"""Equivalent to max::HR_CONFIDENCE"""
ZERO = 0
LOW = 1
MEDIUM = 2
HIGH = 3
def hr_confidence_to_num(hr_confidence: HrConfidence) -> float:
if hr_confidence == HrConfidence.ZERO:
return 0
elif hr_confidence == HrConfidence.LOW:
return 25
elif hr_confidence == HrConfidence.MEDIUM:
return 62.5
elif hr_confidence == HrConfidence.HIGH:
return 100
else:
raise ValueError(f"Invalid HR confidence: {hr_confidence}")
@dataclass
class HrStatusFlags:
# 2 bits
hr_confidence: HrConfidence
# 1 bit
is_active: bool
# 1 bit
is_on_skin: bool
# 4 bits
battery_level: int
@staticmethod
def unmarshal(data: bytes) -> "HrStatusFlags":
val = data[0]
return HrStatusFlags(
hr_confidence=HrConfidence(val & 0b11),
is_active=(val & 0b100) != 0,
is_on_skin=(val & 0b1000) != 0,
battery_level=val >> 4,
)
@dataclass
class HrPacket:
# 8 bits
status: HrStatusFlags
# 8 bits
id: int
# 8 bits
hr: int
# 8 bits (as `n`) + n x 24 bits
raw_data: list[int]
@staticmethod
def unmarshal(data: bytes) -> "HrPacket":
status = HrStatusFlags.unmarshal(data[0:1])
id = data[1]
hr = data[2]
raw_data_count = data[3]
raw_data_payload = data[4:]
if len(raw_data_payload) != (expected_raw_data_len := raw_data_count * 3):
raise ValueError(
f"Invalid raw data payload length: {len(raw_data_payload)}, expected {expected_raw_data_len}"
)
raw_data = [
int.from_bytes(raw_data_payload[i : i + 3], "little")
for i in range(0, expected_raw_data_len, 3)
]
return HrPacket(status, id, hr, raw_data)

221
app/proto/__init__.py Normal file
View File

@ -0,0 +1,221 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: nanopb.proto
# plugin: python-betterproto
# This file has been @generated
from dataclasses import dataclass
from typing import List
import betterproto
import betterproto.lib.google.protobuf as betterproto_lib_google_protobuf
class FieldType(betterproto.Enum):
FT_DEFAULT = 0
FT_CALLBACK = 1
FT_POINTER = 4
FT_STATIC = 2
FT_IGNORE = 3
FT_INLINE = 5
class IntSize(betterproto.Enum):
IS_DEFAULT = 0
IS_8 = 8
IS_16 = 16
IS_32 = 32
IS_64 = 64
class TypenameMangling(betterproto.Enum):
M_NONE = 0
M_STRIP_PACKAGE = 1
M_FLATTEN = 2
M_PACKAGE_INITIALS = 3
class DescriptorSize(betterproto.Enum):
DS_AUTO = 0
DS_1 = 1
DS_2 = 2
DS_4 = 4
DS_8 = 8
@dataclass(eq=False, repr=False)
class NanoPbOptions(betterproto.Message):
"""
This is the inner options message, which basically defines options for
a field. When it is used in message or file scope, it applies to all
fields.
"""
max_size: int = betterproto.int32_field(1)
"""
Allocated size for 'bytes' and 'string' fields.
For string fields, this should include the space for null terminator.
"""
max_length: int = betterproto.int32_field(14)
"""
Maximum length for 'string' fields. Setting this is equivalent
to setting max_size to a value of length+1.
"""
max_count: int = betterproto.int32_field(2)
"""Allocated number of entries in arrays ('repeated' fields)"""
int_size: "IntSize" = betterproto.enum_field(7)
"""
Size of integer fields. Can save some memory if you don't need
full 32 bits for the value.
"""
enum_intsize: "IntSize" = betterproto.enum_field(34)
"""Size for enum fields. Supported by C++11 and C23 standards."""
type: "FieldType" = betterproto.enum_field(3)
"""Force type of field (callback or static allocation)"""
long_names: bool = betterproto.bool_field(4)
"""Use long names for enums, i.e. EnumName_EnumValue."""
packed_struct: bool = betterproto.bool_field(5)
"""
Add 'packed' attribute to generated structs.
Note: this cannot be used on CPUs that break on unaligned
accesses to variables.
"""
packed_enum: bool = betterproto.bool_field(10)
"""Add 'packed' attribute to generated enums."""
skip_message: bool = betterproto.bool_field(6)
"""Skip this message"""
no_unions: bool = betterproto.bool_field(8)
"""Generate oneof fields as normal optional fields instead of union."""
msgid: int = betterproto.uint32_field(9)
"""integer type tag for a message"""
anonymous_oneof: bool = betterproto.bool_field(11)
"""decode oneof as anonymous union"""
proto3: bool = betterproto.bool_field(12)
"""Proto3 singular field does not generate a "has_" flag"""
proto3_singular_msgs: bool = betterproto.bool_field(21)
"""
Force proto3 messages to have no "has_" flag.
This was default behavior until nanopb-0.4.0.
"""
enum_to_string: bool = betterproto.bool_field(13)
"""
Generate an enum->string mapping function (can take up lots of space).
"""
enum_validate: bool = betterproto.bool_field(32)
"""Generate validation methods for enums"""
fixed_length: bool = betterproto.bool_field(15)
"""Generate bytes arrays with fixed length"""
fixed_count: bool = betterproto.bool_field(16)
"""Generate repeated field with fixed count"""
submsg_callback: bool = betterproto.bool_field(22)
"""
Generate message-level callback that is called before decoding submessages.
This can be used to set callback fields for submsgs inside oneofs.
"""
mangle_names: "TypenameMangling" = betterproto.enum_field(17)
"""
Shorten or remove package names from type names.
This option applies only on the file level.
"""
callback_datatype: str = betterproto.string_field(18)
"""Data type for storage associated with callback fields."""
callback_function: str = betterproto.string_field(19)
"""
Callback function used for encoding and decoding.
Prior to nanopb-0.4.0, the callback was specified in per-field pb_callback_t
structure. This is still supported, but does not work inside e.g. oneof or pointer
fields. Instead, a new method allows specifying a per-message callback that
will be called for all callback fields in a message type.
"""
descriptorsize: "DescriptorSize" = betterproto.enum_field(20)
"""
Select the size of field descriptors. This option has to be defined
for the whole message, not per-field. Usually automatic selection is
ok, but if it results in compilation errors you can increase the field
size here.
"""
default_has: bool = betterproto.bool_field(23)
"""Set default value for has_ fields."""
include: List[str] = betterproto.string_field(24)
"""Extra files to include in generated `.pb.h`"""
exclude: List[str] = betterproto.string_field(26)
"""
Automatic includes to exclude from generated `.pb.h`
Same as nanopb_generator.py command line flag -x.
"""
package: str = betterproto.string_field(25)
"""Package name that applies only for nanopb."""
type_override: "betterproto_lib_google_protobuf.FieldDescriptorProtoType" = (
betterproto.enum_field(27)
)
"""
Override type of the field in generated C code. Only to be used with related field types
"""
label_override: "betterproto_lib_google_protobuf.FieldDescriptorProtoLabel" = (
betterproto.enum_field(31)
)
"""
Override of the label of the field (see FieldDescriptorProto.Label). Can be used to create
fields which nanopb considers required in proto3, or whether nanopb treats the field as
optional/required/repeated.
"""
sort_by_tag: bool = betterproto.bool_field(28)
"""
Due to historical reasons, nanopb orders fields in structs by their tag number
instead of the order in .proto. Set this to false to keep the .proto order.
The default value will probably change to false in nanopb-0.5.0.
"""
fallback_type: "FieldType" = betterproto.enum_field(29)
"""
Set the FT_DEFAULT field conversion strategy.
A field that can become a static member of a c struct (e.g. int, bool, etc)
will be a a static field.
Fields with dynamic length are converted to either a pointer or a callback.
"""
initializer: str = betterproto.string_field(30)
"""
Override initializer used in generated MyMessage_init_zero and MyMessage_init_default macros
By default decided automatically based on field default value and datatype.
"""
discard_unused_automatic_types: bool = betterproto.bool_field(33)
"""
Discard unused types that are automatically generated by protoc if they are not actually
needed. Currently this applies to map< > types when the field is ignored by options.
"""
discard_deprecated: bool = betterproto.bool_field(35)
"""
Discard messages and fields marked with [deprecated = true] in the proto file.
"""

View File

@ -0,0 +1,96 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: hr_packet.proto
# plugin: python-betterproto
# This file has been @generated
from dataclasses import dataclass
from typing import List
import betterproto
class HrConfidence(betterproto.Enum):
ZERO = 0
"""[0,25)"""
LOW = 1
"""[25,50)"""
MEDIUM = 2
"""[50,75]"""
HIGH = 3
"""(75,100]"""
class LoRaBw(betterproto.Enum):
BW_NONE = 0
BW_10_4 = 8
"""
nobody is using 7.8 kHz bandwidth
so to satisfy protobuf, we use NONE for zero value
"""
BW_15_6 = 1
BW_20_8 = 9
BW_31_25 = 2
BW_41_7 = 10
BW_62_5 = 3
BW_125_0 = 4
BW_250_0 = 5
BW_500_0 = 6
@dataclass(eq=False, repr=False)
class LoRaParameters(betterproto.Message):
bw: "LoRaBw" = betterproto.enum_field(1)
sf: int = betterproto.int32_field(2)
frequency: float = betterproto.float_field(3)
@dataclass(eq=False, repr=False)
class StatusFlag(betterproto.Message):
hr_confidence: "HrConfidence" = betterproto.enum_field(1)
is_active: bool = betterproto.bool_field(2)
is_on_skin: bool = betterproto.bool_field(3)
battery: int = betterproto.uint32_field(4)
@dataclass(eq=False, repr=False)
class HrOnlyPacket(betterproto.Message):
status: "StatusFlag" = betterproto.message_field(1)
id: int = betterproto.uint32_field(2)
packet_num: int = betterproto.uint32_field(3)
hr: int = betterproto.uint32_field(4)
@dataclass(eq=False, repr=False)
class HrPpgPacket(betterproto.Message):
status: "StatusFlag" = betterproto.message_field(1)
id: int = betterproto.uint32_field(2)
packet_num: int = betterproto.uint32_field(3)
hr: int = betterproto.uint32_field(4)
ppg_data: List[int] = betterproto.uint32_field(5)
@dataclass(eq=False, repr=False)
class PacketStatus(betterproto.Message):
signal_rssi_pkt: int = betterproto.sint32_field(1)
"""
Estimation of RSSI of the LoRa® signal (after despreading) on last packet received.
"""
@dataclass(eq=False, repr=False)
class GatewayInfo(betterproto.Message):
region_id: int = betterproto.uint32_field(1)
gateway_mac: bytes = betterproto.bytes_field(2)
radio_parameters: "LoRaParameters" = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class HrPacket(betterproto.Message):
gateway_info: "GatewayInfo" = betterproto.message_field(1)
hr_only_packet: "HrOnlyPacket" = betterproto.message_field(2, group="packet")
hr_ppg_packet: "HrPpgPacket" = betterproto.message_field(3, group="packet")
packet_status: "PacketStatus" = betterproto.message_field(4)

70
app/utils/__init__.py Normal file
View File

@ -0,0 +1,70 @@
import time
from datetime import timedelta
class Instant:
"""A measurement of a monotonically nondecreasing clock."""
_time: float
@staticmethod
def clock() -> float:
"""Get current clock time in microseconds."""
return time.monotonic()
def __init__(self):
"""Initialize with current clock time."""
self._time = self.clock()
@classmethod
def now(cls) -> "Instant":
"""Create new Instant with current time."""
return cls()
def elapsed(self) -> timedelta:
"""Get elapsed time as timedelta."""
now = self.clock()
diff = now - self._time
return timedelta(seconds=diff)
def elapsed_ms(self) -> int:
"""Get elapsed time in milliseconds."""
return int(self.elapsed().total_seconds() * 1000)
def has_elapsed_ms(self, ms: int) -> bool:
"""Check if specified milliseconds have elapsed."""
return self.elapsed_ms() >= ms
def mut_every_ms(self, ms: int) -> bool:
"""Check if time has elapsed and reset if true."""
if self.elapsed_ms() >= ms:
self.mut_reset()
return True
return False
def has_elapsed(self, duration: timedelta) -> bool:
"""Check if specified duration has elapsed."""
return self.elapsed() >= duration
def mut_every(self, duration: timedelta) -> bool:
"""Check if duration has elapsed and reset if true."""
if self.has_elapsed(duration):
self.mut_reset()
return True
return False
def mut_reset(self) -> None:
"""Reset the timer to current time."""
self._time = self.clock()
def mut_elapsed_and_reset(self) -> timedelta:
"""Get elapsed time and reset timer."""
now = self.clock()
diff = now - self._time
duration = timedelta(microseconds=diff)
self._time = now
return duration
def count(self) -> float:
"""Get the internal time counter value."""
return self._time

63
ble_forward.py Normal file
View File

@ -0,0 +1,63 @@
import anyio
from bleak import BleakScanner, BleakClient
from bleak.backends.characteristic import BleakGATTCharacteristic
from bleak.backends.device import BLEDevice
from typing import Final, Optional
from loguru import logger
from anyio import create_udp_socket, create_connected_udp_socket
DEVICE_NAME: Final[str] = "Polar Sense E4E71028"
UDP_SERVER_HOST: Final[str] = "localhost"
UDP_SERVER_PORT: Final[int] = 50_000
BLE_HR_SERVICE_UUID: Final[str] = "180D"
BLE_HR_MEASUREMENT_CHARACTERISTIC_UUID: Final[str] = "2A37"
async def main():
async def find_device():
while True:
device = await BleakScanner.find_device_by_name(DEVICE_NAME)
if device:
return device
else:
logger.info("Device not found, retrying...")
async with await create_connected_udp_socket(
remote_host=UDP_SERVER_HOST, remote_port=UDP_SERVER_PORT
) as udp:
device = await find_device()
async with BleakClient(device) as client:
logger.info("Connected to target device")
async def find_service(uuid: str):
services = await client.get_services()
for s in services:
if uuid.lower() in s.uuid.lower():
return s
for s in services:
logger.info("Service: {}", s.uuid)
raise ValueError(f"Service not found: {uuid}")
async def find_char(service_uuid: str, char_uuid: str):
service = await find_service(service_uuid)
char = service.get_characteristic(char_uuid)
if char is None:
raise ValueError(f"Characteristic not found: {char_uuid}")
return char
hr_measurement_char = await find_char(
BLE_HR_SERVICE_UUID, BLE_HR_MEASUREMENT_CHARACTERISTIC_UUID
)
async def on_hr_data(char: BleakGATTCharacteristic, data: bytearray):
logger.info("hr_measurement={}", data.hex())
await udp.send(data)
logger.info("Starting notify")
await client.start_notify(hr_measurement_char, on_hr_data)
ev = anyio.Event()
await ev.wait()
if __name__ == "__main__":
anyio.run(main)

View File

@ -0,0 +1,86 @@
// See `Generator Options` section in
// https://jpa.kapsi.fi/nanopb/docs/reference.html#generator-options
// for nanopb specific options
//
// Remember to include
// https://github.com/nanopb/nanopb/blob/master/generator/proto/nanopb.proto
// when generating the proto file
syntax = "proto3";
import "nanopb.proto";
package hr_packet;
enum HrConfidence {
// [0,25)
ZERO = 0;
// [25,50)
LOW = 1;
// [50,75]
MEDIUM = 2;
// (75,100]
HIGH = 3;
}
enum LoRaBW {
BW_NONE = 0;
// nobody is using 7.8 kHz bandwidth
// so to satisfy protobuf, we use NONE for zero value
BW_10_4 = 0x08;
BW_15_6 = 0x01;
BW_20_8 = 0x09;
BW_31_25 = 0x02;
BW_41_7 = 0x0A;
BW_62_5 = 0x03;
BW_125_0 = 0x04;
BW_250_0 = 0x05;
BW_500_0 = 0x06;
}
message LoRaParameters {
LoRaBW bw = 1;
int32 sf = 2 [(nanopb).int_size = IS_8];
float frequency = 3;
}
message StatusFlag {
HrConfidence hr_confidence = 1 [(nanopb).int_size = IS_8];
bool is_active = 2;
bool is_on_skin = 3;
uint32 battery = 4 [(nanopb).int_size = IS_8];
}
message HrOnlyPacket {
StatusFlag status = 1 [(nanopb).int_size = IS_8];
uint32 id = 2 [(nanopb).int_size = IS_8];
uint32 packet_num = 3 [(nanopb).int_size = IS_8];
uint32 hr = 4 [(nanopb).int_size = IS_8];
}
message HrPpgPacket {
StatusFlag status = 1 [(nanopb).int_size = IS_8];
uint32 id = 2 [(nanopb).int_size = IS_8];
uint32 packet_num = 3 [(nanopb).int_size = IS_8];
uint32 hr = 4 [(nanopb).int_size = IS_8];
repeated uint32 ppg_data = 5 [(nanopb).max_count = 36];
}
message PacketStatus {
// Estimation of RSSI of the LoRa® signal (after despreading) on last packet received.
sint32 signal_rssi_pkt = 1;
}
message GatewayInfo {
uint32 region_id = 1 [(nanopb).int_size = IS_8];
bytes gateway_mac = 2 [(nanopb).max_size = 6];
LoRaParameters radio_parameters = 3;
}
message HrPacket {
GatewayInfo gateway_info = 1;
oneof packet {
HrOnlyPacket hr_only_packet = 2;
HrPpgPacket hr_ppg_packet = 3;
}
PacketStatus packet_status = 4;
}

View File

@ -0,0 +1,213 @@
// This file contains definitions of custom options used to control the
// code generator in nanopb protocol buffers library.
//
// Most commonly used options are max_count and max_size, which allow
// the generator to allocate static arrays for repeated and string fields.
//
// There are three ways to use these options:
// 1. Use a separate <protofile>.options file
// 2. Use command line switches to nanopb_generator.py
// 3. Use [(nanopb).option = value] in your <protofile>.proto file
//
// For detailed documentation, refer to "Generator options" in docs/reference.md
syntax = "proto2";
import "google/protobuf/descriptor.proto";
option java_package = "fi.kapsi.koti.jpa.nanopb";
enum FieldType {
FT_DEFAULT = 0; // Automatically decide field type, generate static field if possible.
FT_CALLBACK = 1; // Always generate a callback field.
FT_POINTER = 4; // Always generate a dynamically allocated field.
FT_STATIC = 2; // Generate a static field or raise an exception if not possible.
FT_IGNORE = 3; // Ignore the field completely.
FT_INLINE = 5; // Legacy option, use the separate 'fixed_length' option instead
}
enum IntSize {
IS_DEFAULT = 0; // Default, 32/64bit based on type in .proto
IS_8 = 8;
IS_16 = 16;
IS_32 = 32;
IS_64 = 64;
}
enum TypenameMangling {
M_NONE = 0; // Default, no typename mangling
M_STRIP_PACKAGE = 1; // Strip current package name
M_FLATTEN = 2; // Only use last path component
M_PACKAGE_INITIALS = 3; // Replace the package name by the initials
}
enum DescriptorSize {
DS_AUTO = 0; // Select minimal size based on field type
DS_1 = 1; // 1 word; up to 15 byte fields, no arrays
DS_2 = 2; // 2 words; up to 4095 byte fields, 4095 entry arrays
DS_4 = 4; // 4 words; up to 2^32-1 byte fields, 2^16-1 entry arrays
DS_8 = 8; // 8 words; up to 2^32-1 entry arrays
}
// This is the inner options message, which basically defines options for
// a field. When it is used in message or file scope, it applies to all
// fields.
message NanoPBOptions {
// Allocated size for 'bytes' and 'string' fields.
// For string fields, this should include the space for null terminator.
optional int32 max_size = 1;
// Maximum length for 'string' fields. Setting this is equivalent
// to setting max_size to a value of length+1.
optional int32 max_length = 14;
// Allocated number of entries in arrays ('repeated' fields)
optional int32 max_count = 2;
// Size of integer fields. Can save some memory if you don't need
// full 32 bits for the value.
optional IntSize int_size = 7 [default = IS_DEFAULT];
// Size for enum fields. Supported by C++11 and C23 standards.
optional IntSize enum_intsize = 34 [default = IS_DEFAULT];
// Force type of field (callback or static allocation)
optional FieldType type = 3 [default = FT_DEFAULT];
// Use long names for enums, i.e. EnumName_EnumValue.
optional bool long_names = 4 [default = true];
// Add 'packed' attribute to generated structs.
// Note: this cannot be used on CPUs that break on unaligned
// accesses to variables.
optional bool packed_struct = 5 [default = false];
// Add 'packed' attribute to generated enums.
optional bool packed_enum = 10 [default = false];
// Skip this message
optional bool skip_message = 6 [default = false];
// Generate oneof fields as normal optional fields instead of union.
optional bool no_unions = 8 [default = false];
// integer type tag for a message
optional uint32 msgid = 9;
// decode oneof as anonymous union
optional bool anonymous_oneof = 11 [default = false];
// Proto3 singular field does not generate a "has_" flag
optional bool proto3 = 12 [default = false];
// Force proto3 messages to have no "has_" flag.
// This was default behavior until nanopb-0.4.0.
optional bool proto3_singular_msgs = 21 [default = false];
// Generate an enum->string mapping function (can take up lots of space).
optional bool enum_to_string = 13 [default = false];
// Generate validation methods for enums
optional bool enum_validate = 32 [default = false];
// Generate bytes arrays with fixed length
optional bool fixed_length = 15 [default = false];
// Generate repeated field with fixed count
optional bool fixed_count = 16 [default = false];
// Generate message-level callback that is called before decoding submessages.
// This can be used to set callback fields for submsgs inside oneofs.
optional bool submsg_callback = 22 [default = false];
// Shorten or remove package names from type names.
// This option applies only on the file level.
optional TypenameMangling mangle_names = 17 [default = M_NONE];
// Data type for storage associated with callback fields.
optional string callback_datatype = 18 [default = "pb_callback_t"];
// Callback function used for encoding and decoding.
// Prior to nanopb-0.4.0, the callback was specified in per-field pb_callback_t
// structure. This is still supported, but does not work inside e.g. oneof or pointer
// fields. Instead, a new method allows specifying a per-message callback that
// will be called for all callback fields in a message type.
optional string callback_function = 19 [default = "pb_default_field_callback"];
// Select the size of field descriptors. This option has to be defined
// for the whole message, not per-field. Usually automatic selection is
// ok, but if it results in compilation errors you can increase the field
// size here.
optional DescriptorSize descriptorsize = 20 [default = DS_AUTO];
// Set default value for has_ fields.
optional bool default_has = 23 [default = false];
// Extra files to include in generated `.pb.h`
repeated string include = 24;
// Automatic includes to exclude from generated `.pb.h`
// Same as nanopb_generator.py command line flag -x.
repeated string exclude = 26;
// Package name that applies only for nanopb.
optional string package = 25;
// Override type of the field in generated C code. Only to be used with related field types
optional google.protobuf.FieldDescriptorProto.Type type_override = 27;
// Override of the label of the field (see FieldDescriptorProto.Label). Can be used to create
// fields which nanopb considers required in proto3, or whether nanopb treats the field as
// optional/required/repeated.
optional google.protobuf.FieldDescriptorProto.Label label_override = 31;
// Due to historical reasons, nanopb orders fields in structs by their tag number
// instead of the order in .proto. Set this to false to keep the .proto order.
// The default value will probably change to false in nanopb-0.5.0.
optional bool sort_by_tag = 28 [default = true];
// Set the FT_DEFAULT field conversion strategy.
// A field that can become a static member of a c struct (e.g. int, bool, etc)
// will be a a static field.
// Fields with dynamic length are converted to either a pointer or a callback.
optional FieldType fallback_type = 29 [default = FT_CALLBACK];
// Override initializer used in generated MyMessage_init_zero and MyMessage_init_default macros
// By default decided automatically based on field default value and datatype.
optional string initializer = 30;
// Discard unused types that are automatically generated by protoc if they are not actually
// needed. Currently this applies to map< > types when the field is ignored by options.
optional bool discard_unused_automatic_types = 33 [default = true];
// Discard messages and fields marked with [deprecated = true] in the proto file.
optional bool discard_deprecated = 35 [default = false];
}
// Extensions to protoc 'Descriptor' type in order to define options
// inside a .proto file.
//
// Protocol Buffers extension number registry
// --------------------------------
// Project: Nanopb
// Contact: Petteri Aimonen <jpa@kapsi.fi>
// Web site: http://kapsi.fi/~jpa/nanopb
// Extensions: 1010 (all types)
// --------------------------------
extend google.protobuf.FileOptions {
optional NanoPBOptions nanopb_fileopt = 1010;
}
extend google.protobuf.MessageOptions {
optional NanoPBOptions nanopb_msgopt = 1010;
}
extend google.protobuf.EnumOptions {
optional NanoPBOptions nanopb_enumopt = 1010;
}
extend google.protobuf.FieldOptions {
optional NanoPBOptions nanopb = 1010;
}

120
convert_parquet_to_csv.py Normal file
View File

@ -0,0 +1,120 @@
#!/usr/bin/env python3
"""
Convert parquet file to CSV with only HR field
"""
import pyarrow.parquet as pq
import pyarrow as pa
import pandas as pd
import sys
def convert_parquet_to_csv(parquet_file, output_file):
"""Convert parquet file to CSV extracting only HR data"""
try:
# Read the parquet file
print(f"Reading {parquet_file}...")
pf = pq.ParquetFile(parquet_file)
print("Schema:")
print(pf.schema)
# Try to read just specific columns
try:
# Read the table with specific columns
table = pf.read(columns=["hr_data"])
print("Successfully read hr_data column")
# Convert to pandas, handling the nested structure carefully
df = table.to_pandas()
print("Converted to pandas DataFrame")
except Exception as e:
print(f"Error reading with pandas conversion: {e}")
# Try alternative approach - read raw pyarrow table
table = pf.read()
print("Read raw table successfully")
# Get hr_data column directly from pyarrow
hr_data_column = table.column("hr_data")
print(f"HR data column type: {hr_data_column.type}")
# Convert the column to a list format
hr_data_values = []
for i in range(len(hr_data_column)):
chunk = hr_data_column.chunk(0)
list_array = chunk.slice(i, 1).to_pandas().iloc[0]
if list_array is not None and len(list_array) > 0:
hr_data_values.extend(list_array)
if hr_data_values:
# Create DataFrame with HR data
hr_df = pd.DataFrame({"HR": hr_data_values})
print(f"\nExtracted {len(hr_data_values)} HR values")
print("Sample HR values:")
print(hr_df.head(10))
# Save to CSV
hr_df.to_csv(output_file, index=False)
print(f"\nSaved HR data to {output_file}")
return True
else:
print("No HR data found")
return False
# If we got here, the pandas conversion worked
print("Columns available:")
print(df.columns.tolist())
print("\nData shape:", df.shape)
# Extract HR data - assuming it's in hr_data column
if "hr_data" in df.columns:
# Handle nested list structure
hr_values = []
for row_idx in range(len(df)):
hr_data = df["hr_data"].iloc[row_idx]
if hr_data is not None and len(hr_data) > 0:
hr_values.extend(hr_data)
if hr_values:
# Create a new DataFrame with HR data
hr_df = pd.DataFrame({"HR": hr_values})
print(f"\nExtracted {len(hr_values)} HR values")
print("Sample HR values:")
print(hr_df.head(10))
# Save to CSV
hr_df.to_csv(output_file, index=False)
print(f"\nSaved HR data to {output_file}")
return True
else:
print("No HR values found in the data")
return False
else:
print("Error: 'hr_data' column not found in the data")
print("Available columns:", df.columns.tolist())
return False
except Exception as e:
print(f"Error: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
parquet_file = "history_20250610_165414.parquet"
output_file = "history_20250610_165414_HR.csv"
success = convert_parquet_to_csv(parquet_file, output_file)
if success:
print(f"\nConversion completed successfully!")
print(f"Input: {parquet_file}")
print(f"Output: {output_file}")
else:
print("Conversion failed!")
sys.exit(1)

7
gen_client.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
PROJECT_ROOT=$SCRIPT_DIR
APP_PROTO_DIR=$PROJECT_ROOT/components/app_proto/proto
OUTPUT_DIR=$SCRIPT_DIR/app/proto
uv run python -m grpc_tools.protoc --python_betterproto_out=$OUTPUT_DIR -I $APP_PROTO_DIR $APP_PROTO_DIR/hr_packet.proto

View File

@ -0,0 +1,89 @@
HR
120.0
120.0
120.0
121.0
121.0
121.0
122.0
124.0
124.0
124.0
125.0
125.0
125.0
125.0
124.0
124.0
125.0
126.0
127.0
129.0
130.0
131.0
132.0
134.0
135.0
137.0
138.0
140.0
141.0
143.0
144.0
144.0
145.0
146.0
147.0
147.0
147.0
145.0
144.0
143.0
142.0
140.0
139.0
138.0
138.0
138.0
138.0
138.0
138.0
137.0
135.0
134.0
132.0
130.0
129.0
128.0
127.0
127.0
127.0
127.0
127.0
128.0
128.0
129.0
129.0
129.0
129.0
128.0
127.0
127.0
125.0
124.0
124.0
123.0
123.0
123.0
123.0
124.0
124.0
125.0
125.0
126.0
127.0
127.0
128.0
128.0
128.0
129.0
1 HR
2 120.0
3 120.0
4 120.0
5 121.0
6 121.0
7 121.0
8 122.0
9 124.0
10 124.0
11 124.0
12 125.0
13 125.0
14 125.0
15 125.0
16 124.0
17 124.0
18 125.0
19 126.0
20 127.0
21 129.0
22 130.0
23 131.0
24 132.0
25 134.0
26 135.0
27 137.0
28 138.0
29 140.0
30 141.0
31 143.0
32 144.0
33 144.0
34 145.0
35 146.0
36 147.0
37 147.0
38 147.0
39 145.0
40 144.0
41 143.0
42 142.0
43 140.0
44 139.0
45 138.0
46 138.0
47 138.0
48 138.0
49 138.0
50 138.0
51 137.0
52 135.0
53 134.0
54 132.0
55 130.0
56 129.0
57 128.0
58 127.0
59 127.0
60 127.0
61 127.0
62 127.0
63 128.0
64 128.0
65 129.0
66 129.0
67 129.0
68 129.0
69 128.0
70 127.0
71 127.0
72 125.0
73 124.0
74 124.0
75 123.0
76 123.0
77 123.0
78 123.0
79 124.0
80 124.0
81 125.0
82 125.0
83 126.0
84 127.0
85 127.0
86 128.0
87 128.0
88 128.0
89 129.0

474
main.py
View File

@ -11,6 +11,8 @@ from typing import (
TypedDict,
Any,
cast,
Dict,
Union,
)
from loguru import logger
@ -18,32 +20,57 @@ import numpy as np
import plotly.graph_objects as go
import streamlit as st
import anyio
from anyio.abc import TaskGroup
from anyio import create_memory_object_stream
from anyio.abc import TaskGroup, UDPSocket
from anyio import create_memory_object_stream, create_udp_socket
from anyio.streams.memory import MemoryObjectSendStream, MemoryObjectReceiveStream
from aiomqtt import Client as MqttClient, Message as MqttMessage
from threading import Thread
from time import sleep
from pydantic import BaseModel, computed_field
from datetime import datetime
from datetime import datetime, timedelta
import struct
import awkward as ak
from awkward import Array as AwkwardArray, Record as AwkwardRecord
import orjson
from app.model import AlgoReport
from app.utils import Instant
from collections import deque
from dataclasses import dataclass
from aiomqtt import Client as MqttClient, Message as MqttMessage
from app.proto.hr_packet import HrPacket, HrOnlyPacket, HrPpgPacket, HrConfidence
import betterproto
MQTT_BROKER: Final[str] = "weihua-iot.cn"
MQTT_BROKER_PORT: Final[int] = 1883
MAX_LENGTH = 600
TOPIC: Final[str] = "/hr/region/1/band/#"
UDP_DEVICE_ID: Final[int] = 0xFF
NDArray = np.ndarray
T = TypeVar("T")
class DeviceHistory(TypedDict):
timescape: deque[datetime]
hr_data: deque[float]
# https://handmadesoftware.medium.com/streamlit-asyncio-and-mongodb-f85f77aea825
class AppState(TypedDict):
worker_thread: Thread
client: MqttClient
message_queue: MemoryObjectReceiveStream[MqttMessage]
message_queue: MemoryObjectReceiveStream[Tuple[datetime, bytes]]
mqtt_message_queue: MemoryObjectReceiveStream[Tuple[datetime, MqttMessage]]
task_group: TaskGroup
history: dict[str, AwkwardArray]
device_histories: Dict[Union[int, str], DeviceHistory] # device_id -> DeviceHistory
MQTT_BROKER: Final[str] = "192.168.2.189"
MQTT_BROKER_PORT: Final[int] = 1883
BUSY_POLLING_INTERVAL_S: Final[float] = 0.001
BATCH_MESSAGE_INTERVAL_S: Final[float] = 0.1
NORMAL_REFRESH_INTERVAL_S: Final[float] = 0.5
QUEUE_BUFFER_SIZE: Final[int] = 32
UDP_SERVER_HOST: Final[str] = "localhost"
UDP_SERVER_PORT: Final[int] = 50_000
MAX_LENGTH = 600
TOPIC: Final[str] = "GwData"
NDArray = np.ndarray
T = TypeVar("T")
@ -55,181 +82,320 @@ def unwrap(value: Optional[T]) -> T:
return value
def parse_ble_hr_measurement(data: bytes) -> Optional[int]:
"""
Parse BLE Heart Rate Measurement characteristic data according to Bluetooth specification.
Args:
data: Raw bytes from the heart rate measurement characteristic
Returns:
Heart rate value in BPM, or None if parsing fails
Note:
```cpp
/**
* @brief Structure of the Heart Rate Measurement characteristic
*
* @see https://www.bluetooth.com/specifications/gss/
* @see section 3.116 Heart Rate Measurement of the document: GATT Specification Supplement.
*/
struct ble_hr_measurement_flag_t {
// LSB first
/*
* 0: uint8_t
* 1: uint16_t
*/
bool heart_rate_value_format : 1;
bool sensor_contact_detected : 1;
bool sensor_contact_supported : 1;
bool energy_expended_present : 1;
bool rr_interval_present : 1;
uint8_t reserved : 3;
};
```
"""
if len(data) < 2:
return None
# First byte contains flags
flags = data[0]
# Bit 0: Heart Rate Value Format (0 = uint8, 1 = uint16)
heart_rate_value_format = (flags & 0x01) != 0
try:
if heart_rate_value_format:
# 16-bit heart rate value (little endian)
if len(data) < 3:
return None
hr_value = int.from_bytes(data[1:3], byteorder="little")
else:
# 8-bit heart rate value
hr_value = data[1]
return hr_value
except (IndexError, ValueError):
return None
def hr_confidence_to_percentage(confidence: HrConfidence) -> float:
"""Convert HrConfidence enum to percentage value"""
if confidence == HrConfidence.ZERO:
return 0 # mid-point of [0,25)
elif confidence == HrConfidence.LOW:
return 37.5 # mid-point of [25,50)
elif confidence == HrConfidence.MEDIUM:
return 62.5 # mid-point of [50,75)
elif confidence == HrConfidence.HIGH:
return 100 # mid-point of (75,100]
else:
raise ValueError(f"Invalid HrConfidence: {confidence}")
def create_device_history() -> DeviceHistory:
"""Create a new device history structure"""
return {
"timescape": deque(maxlen=MAX_LENGTH),
"hr_data": deque(maxlen=MAX_LENGTH),
}
def get_device_name(device_id: Union[int, str]) -> str:
"""Get display name for device"""
if device_id == UDP_DEVICE_ID:
return "UDP Device"
return f"Device {device_id}"
@st.cache_resource
def resource(params: Any = None):
client: Optional[MqttClient] = None
tx, rx = create_memory_object_stream[MqttMessage]()
set_ev = anyio.Event()
tx, rx = create_memory_object_stream[Tuple[datetime, bytes]](
max_buffer_size=QUEUE_BUFFER_SIZE
)
mqtt_tx, mqtt_rx = create_memory_object_stream[Tuple[datetime, MqttMessage]](
max_buffer_size=QUEUE_BUFFER_SIZE
)
tg: Optional[TaskGroup] = None
async def main():
nonlocal tg
nonlocal client
tg = anyio.create_task_group()
async with tg:
client = MqttClient(MQTT_BROKER, port=MQTT_BROKER_PORT)
async with client:
async def udp_task():
async with await create_udp_socket(
local_host=UDP_SERVER_HOST, local_port=UDP_SERVER_PORT, reuse_port=True
) as udp:
logger.info(
"UDP server listening on {}:{}", UDP_SERVER_HOST, UDP_SERVER_PORT
)
async for packet, _ in udp:
timestamp = datetime.now()
await tx.send((timestamp, packet))
async def mqtt_task():
async with MqttClient(MQTT_BROKER, port=MQTT_BROKER_PORT) as client:
await client.subscribe(TOPIC)
logger.info(
"Subscribed {}:{} to topic {}", MQTT_BROKER, MQTT_BROKER_PORT, TOPIC
"Subscribed to MQTT broker {}:{} topic {}",
MQTT_BROKER,
MQTT_BROKER_PORT,
TOPIC,
)
# https://aiomqtt.bo3hm.com/subscribing-to-a-topic.html
async for message in client.messages:
await tx.send(message)
timestamp = datetime.now()
await mqtt_tx.send((timestamp, message))
tr = Thread(target=anyio.run, args=(main,))
async def combined_task():
nonlocal set_ev, tg
tg = anyio.create_task_group()
set_ev.set()
async with tg:
async with anyio.create_task_group() as inner_tg:
inner_tg.start_soon(udp_task)
inner_tg.start_soon(mqtt_task)
tr = Thread(target=anyio.run, args=(combined_task,))
tr.start()
sleep(0.1)
while not set_ev.is_set():
sleep(BUSY_POLLING_INTERVAL_S)
logger.info("UDP and MQTT tasks initialized in single thread")
state: AppState = {
"worker_thread": tr,
"client": unwrap(client),
"message_queue": rx,
"mqtt_message_queue": mqtt_rx,
"task_group": unwrap(tg),
"history": {},
"device_histories": {},
}
logger.info("Resource created")
return state
class GwMessage(TypedDict):
v: int
mid: int
time: int
ip: str
mac: str
devices: list[Any]
rssi: int
class DeviceMessage(BaseModel):
mac: str
"""
Hex string, capital letters, e.g. "D6AF1CA9C491"
"""
service: str
"""
Hex string, capital letters, e.g. "180D"
"""
characteristic: str
"""
Hex string, capital letters, e.g. "2A37"
"""
value: str
"""
Hex string, capital letters, e.g. "0056"
"""
rssi: int
@property
def value_bytes(self) -> bytes:
return bytes.fromhex(self.value)
def get_device_data(message: GwMessage) -> List[DeviceMessage]:
"""
devices
[[5,"D6AF1CA9C491","180D","2A37","0056",-58],[5,"A09E1AE4E710","180D","2A37","0055",-50]]
unknown, mac addr, service, characteristic, value (hex), rssi
"""
l: list[DeviceMessage] = []
for d in message["devices"]:
x, mac, service, characteristic, value, rssi = d
l.append(
DeviceMessage(
mac=mac,
service=service,
characteristic=characteristic,
value=value,
rssi=rssi,
)
)
return l
def payload_to_hr(payload: bytes) -> int:
"""
ignore the first byte, parse the rest as a big-endian integer
Bit 0 (Heart Rate Format)
0: Heart rate value is 8 bits
1: Heart rate value is 16 bits
Bit 3 (Energy Expended)
Indicates whether energy expended data is present
Bit 4 (RR Interval)
Indicates whether RR interval data is present
"""
flags = payload[0]
if flags & 0b00000001:
return int.from_bytes(payload[1:3], "big")
else:
return payload[1]
def main():
state = resource()
logger.info("Resource created")
history = state["history"]
def push_new_message(message: GwMessage):
dms = get_device_data(message)
now = datetime.now()
for dm in dms:
rec = AwkwardRecord(
{
"time": now,
"value": payload_to_hr(dm.value_bytes),
"rssi": dm.rssi,
}
)
if dm.mac not in history:
history[dm.mac] = AwkwardArray([rec])
else:
history[dm.mac] = ak.concatenate([history[dm.mac], [rec]])
if len(history[dm.mac]) > MAX_LENGTH:
history[dm.mac] = AwkwardArray(history[dm.mac][-MAX_LENGTH:])
device_histories = state["device_histories"]
def on_export():
now = datetime.now()
filename = f"export-{now.strftime('%Y-%m-%d-%H-%M-%S')}.parquet"
ak.to_parquet([history], filename)
logger.info("Export to {}", filename)
file_name = f"history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.parquet"
logger.info(f"Exporting to {file_name}")
# Export all device histories
export_data = {
device_id: ak.Record(dev_hist)
for device_id, dev_hist in device_histories.items()
}
rec = ak.Record(export_data)
ak.to_parquet(rec, file_name)
def on_clear():
history.clear()
logger.info("History cleared")
nonlocal device_histories
logger.info("Clearing history")
device_histories.clear()
# https://docs.streamlit.io/develop/api-reference/layout
st.title("HR Visualizer")
with st.container(border=True):
c1, c2 = st.columns(2)
with c1:
st.button(
"Export", help="Export the current data to a parquet file", on_click=on_export
"Export",
help="Export the current data to a parquet file",
on_click=on_export,
)
with c2:
st.button(
"Clear",
help="Clear the current data",
on_click=on_clear,
)
# Device selection
if device_histories:
selected_devices = st.multiselect(
"Select devices to display:",
options=list(device_histories.keys()),
default=list(device_histories.keys()),
format_func=get_device_name,
)
st.button("Clear", help="Clear the current data", on_click=on_clear)
pannel = st.empty()
while True:
try:
message = state["message_queue"].receive_nowait()
except anyio.WouldBlock:
continue
m: str
if isinstance(message.payload, str):
m = message.payload
elif isinstance(message.payload, bytes):
m = message.payload.decode("utf-8")
else:
logger.warning("Unknown message type: {}", type(message.payload))
selected_devices = []
# Process UDP messages (treat as device_id = 0)
try:
while True:
timestamp, packet = state["message_queue"].receive_nowait()
hr_value = parse_ble_hr_measurement(packet)
if hr_value is not None:
now = timestamp
if UDP_DEVICE_ID not in device_histories:
device_histories[UDP_DEVICE_ID] = create_device_history()
dev_hist = device_histories[UDP_DEVICE_ID]
dev_hist["timescape"].append(now)
dev_hist["hr_data"].append(float(hr_value))
logger.debug("UDP Device: HR={}", hr_value)
except anyio.WouldBlock:
pass
# Process MQTT messages
try:
while True:
timestamp, mqtt_message = state["mqtt_message_queue"].receive_nowait()
if mqtt_message.payload:
try:
payload_bytes = mqtt_message.payload
if isinstance(payload_bytes, str):
payload_bytes = payload_bytes.encode("utf-8")
elif not isinstance(payload_bytes, bytes):
continue
d = cast(GwMessage, orjson.loads(m))
push_new_message(d)
def to_scatter(key: str, dev_history: AwkwardArray):
x = ak.to_numpy(dev_history["time"])
y = ak.to_numpy(dev_history["value"])
return go.Scatter(x=x, y=y, mode="lines+markers", name=key)
hr_packet = HrPacket()
hr_packet.parse(payload_bytes)
scatters = [to_scatter(k, el) for k, el in history.items()]
fig = go.Figure(scatters)
pannel.plotly_chart(fig)
now = timestamp
device_id = None
hr_value = None
if hr_packet.hr_only_packet:
packet = hr_packet.hr_only_packet
device_id = packet.id
hr_value = packet.hr
elif hr_packet.hr_ppg_packet:
packet = hr_packet.hr_ppg_packet
device_id = packet.id
hr_value = packet.hr
if device_id is not None and hr_value is not None:
if device_id not in device_histories:
device_histories[device_id] = create_device_history()
dev_hist = device_histories[device_id]
dev_hist["timescape"].append(now)
dev_hist["hr_data"].append(float(hr_value))
logger.debug("Device {}: HR={}", device_id, hr_value)
except (ValueError, TypeError, IndexError, UnicodeDecodeError) as e:
logger.error("MQTT protobuf message parsing: {}", e)
except anyio.WouldBlock:
pass
# Update visualization - HR Graphs
if device_histories:
st.subheader("Heart Rate Data")
# Create plots for selected devices
traces = []
colors = [
"red",
"green",
"blue",
"orange",
"purple",
"brown",
"pink",
"gray",
"olive",
"cyan",
]
for i, device_id in enumerate(selected_devices):
if device_id in device_histories:
dev_hist = device_histories[device_id]
if dev_hist["hr_data"] and dev_hist["timescape"]:
color = colors[i % len(colors)]
traces.append(
go.Scatter(
x=list(dev_hist["timescape"]),
y=list(dev_hist["hr_data"]),
mode="lines+markers",
name=get_device_name(device_id),
line=dict(color=color),
marker=dict(size=4),
)
)
if traces:
fig = go.Figure(data=traces)
fig.update_layout(
title="Heart Rate Monitor",
xaxis_title="Time",
yaxis_title="Heart Rate (BPM)",
hovermode="x unified",
showlegend=True,
height=500,
)
st.plotly_chart(fig, use_container_width=True)
else:
st.info("No heart rate data available for selected devices")
else:
st.info("No devices connected yet. Waiting for data...")
sleep(NORMAL_REFRESH_INTERVAL_S)
st.rerun()
if __name__ == "__main__":
main()
# 1659A202

234
main_mqtt.py Normal file
View File

@ -0,0 +1,234 @@
from typing import (
Annotated,
AsyncGenerator,
Final,
Generator,
List,
Literal,
Optional,
Tuple,
TypeVar,
TypedDict,
Any,
cast,
)
from loguru import logger
import numpy as np
import plotly.graph_objects as go
import streamlit as st
import anyio
from anyio.abc import TaskGroup
from anyio import create_memory_object_stream
from anyio.streams.memory import MemoryObjectSendStream, MemoryObjectReceiveStream
from aiomqtt import Client as MqttClient, Message as MqttMessage
from threading import Thread
from time import sleep
from pydantic import BaseModel, computed_field
from datetime import datetime
import awkward as ak
from awkward import Array as AwkwardArray, Record as AwkwardRecord
# https://handmadesoftware.medium.com/streamlit-asyncio-and-mongodb-f85f77aea825
class AppState(TypedDict):
worker_thread: Thread
client: MqttClient
message_queue: MemoryObjectReceiveStream[MqttMessage]
task_group: TaskGroup
history: dict[str, AwkwardArray]
MQTT_BROKER: Final[str] = "192.168.2.189"
MQTT_BROKER_PORT: Final[int] = 1883
MAX_LENGTH = 600
TOPIC: Final[str] = "GwData"
NDArray = np.ndarray
T = TypeVar("T")
def unwrap(value: Optional[T]) -> T:
if value is None:
raise ValueError("Value is None")
return value
@st.cache_resource
def resource(params: Any = None):
client: Optional[MqttClient] = None
tx, rx = create_memory_object_stream[MqttMessage]()
tg: Optional[TaskGroup] = None
async def main():
nonlocal tg
nonlocal client
tg = anyio.create_task_group()
async with tg:
client = MqttClient(MQTT_BROKER, port=MQTT_BROKER_PORT)
async with client:
await client.subscribe(TOPIC)
logger.info(
"Subscribed {}:{} to topic {}", MQTT_BROKER, MQTT_BROKER_PORT, TOPIC
)
# https://aiomqtt.bo3hm.com/subscribing-to-a-topic.html
async for message in client.messages:
await tx.send(message)
tr = Thread(target=anyio.run, args=(main,))
tr.start()
sleep(0.1)
state: AppState = {
"worker_thread": tr,
"client": unwrap(client),
"message_queue": rx,
"task_group": unwrap(tg),
"history": {},
}
return state
class GwMessage(TypedDict):
v: int
mid: int
time: int
ip: str
mac: str
devices: list[Any]
rssi: int
class DeviceMessage(BaseModel):
mac: str
"""
Hex string, capital letters, e.g. "D6AF1CA9C491"
"""
service: str
"""
Hex string, capital letters, e.g. "180D"
"""
characteristic: str
"""
Hex string, capital letters, e.g. "2A37"
"""
value: str
"""
Hex string, capital letters, e.g. "0056"
"""
rssi: int
@property
def value_bytes(self) -> bytes:
return bytes.fromhex(self.value)
def get_device_data(message: GwMessage) -> List[DeviceMessage]:
"""
devices
[[5,"D6AF1CA9C491","180D","2A37","0056",-58],[5,"A09E1AE4E710","180D","2A37","0055",-50]]
unknown, mac addr, service, characteristic, value (hex), rssi
"""
l: list[DeviceMessage] = []
for d in message["devices"]:
x, mac, service, characteristic, value, rssi = d
l.append(
DeviceMessage(
mac=mac,
service=service,
characteristic=characteristic,
value=value,
rssi=rssi,
)
)
return l
def payload_to_hr(payload: bytes) -> int:
"""
ignore the first byte, parse the rest as a big-endian integer
Bit 0 (Heart Rate Format)
0: Heart rate value is 8 bits
1: Heart rate value is 16 bits
Bit 3 (Energy Expended)
Indicates whether energy expended data is present
Bit 4 (RR Interval)
Indicates whether RR interval data is present
"""
flags = payload[0]
if flags & 0b00000001:
return int.from_bytes(payload[1:3], "big")
else:
return payload[1]
def main():
state = resource()
logger.info("Resource created")
history = state["history"]
def push_new_message(message: GwMessage):
dms = get_device_data(message)
now = datetime.now()
for dm in dms:
rec = AwkwardRecord(
{
"time": now,
"value": payload_to_hr(dm.value_bytes),
"rssi": dm.rssi,
}
)
if dm.mac not in history:
history[dm.mac] = AwkwardArray([rec])
else:
history[dm.mac] = ak.concatenate([history[dm.mac], [rec]])
if len(history[dm.mac]) > MAX_LENGTH:
history[dm.mac] = AwkwardArray(history[dm.mac][-MAX_LENGTH:])
def on_export():
now = datetime.now()
filename = f"export-{now.strftime('%Y-%m-%d-%H-%M-%S')}.parquet"
ak.to_parquet([history], filename)
logger.info("Export to {}", filename)
def on_clear():
history.clear()
logger.info("History cleared")
st.button(
"Export", help="Export the current data to a parquet file", on_click=on_export
)
st.button("Clear", help="Clear the current data", on_click=on_clear)
pannel = st.empty()
while True:
try:
message = state["message_queue"].receive_nowait()
except anyio.WouldBlock:
continue
m: str
if isinstance(message.payload, str):
m = message.payload
elif isinstance(message.payload, bytes):
m = message.payload.decode("utf-8")
else:
logger.warning("Unknown message type: {}", type(message.payload))
continue
d = cast(GwMessage, orjson.loads(m))
push_new_message(d)
def to_scatter(key: str, dev_history: AwkwardArray):
x = ak.to_numpy(dev_history["time"])
y = ak.to_numpy(dev_history["value"])
return go.Scatter(x=x, y=y, mode="lines+markers", name=key)
scatters = [to_scatter(k, el) for k, el in history.items()]
fig = go.Figure(scatters)
pannel.plotly_chart(fig)
if __name__ == "__main__":
main()
# 1659A202

24
pyproject.toml Normal file
View File

@ -0,0 +1,24 @@
[project]
name = "max_visualizer"
version = "0.1.0"
description = "python scripts for testing embedded system"
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"aiomqtt>=2.4.0",
"anyio>=4.9.0",
"awkward>=2.8.3",
"betterproto[compiler]>=2.0.0b7",
"bleak>=0.22.3",
"grpcio-tools>=1.71.0",
"loguru>=0.7.3",
"matplotlib>=3.10.3",
"paho-mqtt>=2.1.0",
"plotly>=6.1.2",
"pydantic>=2.11.5",
"seaborn>=0.13.2",
"streamlit>=1.45.1",
]
[project.scripts]
main = "main:main"

9
requirements.txt Normal file
View File

@ -0,0 +1,9 @@
aiomqtt==2.3.0
anyio==4.6.2.post1
awkward==2.7.4
loguru==0.7.2
numpy==2.2.2
orjson==3.10.15
plotly==5.19.0
pydantic==2.10.6
streamlit==1.40.1

5
run.sh
View File

@ -1 +1,4 @@
python -m streamlit run main.py
#!/usr/bin/env bash
# python -m streamlit run main.py
python3.12 -m streamlit run main.py

1573
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff

216
visualize_hr_data.py Normal file
View File

@ -0,0 +1,216 @@
#!/usr/bin/env python3
"""
Visualize HR data from two CSV files with max value alignment
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
import seaborn as sns
def load_and_visualize_hr_data():
"""Load and visualize HR data from both CSV files with max value alignment"""
# Load the first CSV file (with timestamps)
print("Loading 2025-06-10_16_26.CSV...")
df1 = pd.read_csv("2025-06-10_16_26.CSV")
print(f"Dataset 1 shape: {df1.shape}")
print(f"Dataset 1 columns: {df1.columns.tolist()}")
print(f"Dataset 1 HR range: {df1['hr'].min():.1f} - {df1['hr'].max():.1f}")
# Load the second CSV file (HR only)
print("\nLoading history_20250610_165414_HR.csv...")
df2 = pd.read_csv("history_20250610_165414_HR.csv")
print(f"Dataset 2 shape: {df2.shape}")
print(f"Dataset 2 columns: {df2.columns.tolist()}")
print(f"Dataset 2 HR range: {df2['HR'].min():.1f} - {df2['HR'].max():.1f}")
# Find the maximum values and their indices
max_hr1 = df1["hr"].max()
max_idx1 = df1["hr"].idxmax()
max_hr2 = df2["HR"].max()
max_idx2 = df2["HR"].idxmax()
print(f"\nDataset 1 max HR: {max_hr1:.1f} at index {max_idx1}")
print(f"Dataset 2 max HR: {max_hr2:.1f} at index {max_idx2}")
# Create time indices for both datasets
df1["time_seconds"] = df1.index # Use index as time in seconds
df2["time_seconds"] = df2.index # Use index as time in seconds
# Align datasets by shifting so max values occur at the same time
# We'll align both to time = 0 at their respective max points
df1["time_aligned"] = df1["time_seconds"] - max_idx1
df2["time_aligned"] = df2["time_seconds"] - max_idx2
print(f"\nAfter alignment:")
print(
f"Dataset 1 time range: {df1['time_aligned'].min():.1f} to {df1['time_aligned'].max():.1f} seconds"
)
print(
f"Dataset 2 time range: {df2['time_aligned'].min():.1f} to {df2['time_aligned'].max():.1f} seconds"
)
# Create the visualization
plt.style.use("seaborn-v0_8")
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
fig.suptitle(
"Heart Rate Data Visualization (Max Values Aligned)",
fontsize=16,
fontweight="bold",
)
# Plot 1: Time series of both datasets (aligned)
axes[0, 0].plot(
df1["time_aligned"],
df1["hr"],
"o-",
alpha=0.7,
markersize=3,
label=f"Dataset 1 (max: {max_hr1:.1f})",
color="blue",
)
axes[0, 0].plot(
df2["time_aligned"],
df2["HR"],
"s-",
alpha=0.7,
markersize=3,
label=f"Dataset 2 (max: {max_hr2:.1f})",
color="red",
)
# Highlight the max values
axes[0, 0].axvline(
x=0, color="black", linestyle="--", alpha=0.5, label="Max values aligned"
)
axes[0, 0].scatter([0], [max_hr1], color="blue", s=100, zorder=5, marker="*")
axes[0, 0].scatter([0], [max_hr2], color="red", s=100, zorder=5, marker="*")
axes[0, 0].set_xlabel("Time (seconds, aligned to max)")
axes[0, 0].set_ylabel("Heart Rate (bpm)")
axes[0, 0].set_title("Heart Rate Over Time (Aligned at Max Values)")
axes[0, 0].legend()
axes[0, 0].grid(True, alpha=0.3)
# Plot 2: Distribution comparison
axes[0, 1].hist(
df1["hr"], bins=20, alpha=0.7, label="Dataset 1", color="blue", density=True
)
axes[0, 1].hist(
df2["HR"], bins=20, alpha=0.7, label="Dataset 2", color="red", density=True
)
axes[0, 1].axvline(
max_hr1, color="blue", linestyle="--", alpha=0.8, label=f"Max 1: {max_hr1:.1f}"
)
axes[0, 1].axvline(
max_hr2, color="red", linestyle="--", alpha=0.8, label=f"Max 2: {max_hr2:.1f}"
)
axes[0, 1].set_xlabel("Heart Rate (bpm)")
axes[0, 1].set_ylabel("Density")
axes[0, 1].set_title("Heart Rate Distribution")
axes[0, 1].legend()
axes[0, 1].grid(True, alpha=0.3)
# Plot 3: Box plot comparison
data_for_box = [df1["hr"], df2["HR"]]
labels = ["Dataset 1\n(2025-06-10_16_26)", "Dataset 2\n(history_20250610_165414)"]
bp = axes[1, 0].boxplot(data_for_box, tick_labels=labels, patch_artist=True)
bp["boxes"][0].set_facecolor("lightblue")
bp["boxes"][1].set_facecolor("lightcoral")
axes[1, 0].set_ylabel("Heart Rate (bpm)")
axes[1, 0].set_title("Heart Rate Distribution Comparison")
axes[1, 0].grid(True, alpha=0.3)
# Plot 4: Overlay plot around max values (zoomed in view)
# Show ±20 seconds around the aligned max values
window = 20
df1_window = df1[(df1["time_aligned"] >= -window) & (df1["time_aligned"] <= window)]
df2_window = df2[(df2["time_aligned"] >= -window) & (df2["time_aligned"] <= window)]
if len(df1_window) > 0:
axes[1, 1].plot(
df1_window["time_aligned"],
df1_window["hr"],
"o-",
alpha=0.8,
markersize=4,
label=f"Dataset 1",
color="blue",
linewidth=2,
)
if len(df2_window) > 0:
axes[1, 1].plot(
df2_window["time_aligned"],
df2_window["HR"],
"s-",
alpha=0.8,
markersize=4,
label=f"Dataset 2",
color="red",
linewidth=2,
)
axes[1, 1].axvline(
x=0, color="black", linestyle="--", alpha=0.5, label="Max alignment"
)
axes[1, 1].scatter(
[0], [max_hr1], color="blue", s=150, zorder=5, marker="*", edgecolor="black"
)
axes[1, 1].scatter(
[0], [max_hr2], color="red", s=150, zorder=5, marker="*", edgecolor="black"
)
axes[1, 1].set_xlabel("Time (seconds, aligned to max)")
axes[1, 1].set_ylabel("Heart Rate (bpm)")
axes[1, 1].set_title(f"Zoomed View Around Max Values (±{window}s)")
axes[1, 1].legend()
axes[1, 1].grid(True, alpha=0.3)
plt.tight_layout()
# Save the plot
output_file = "hr_data_visualization_aligned.png"
plt.savefig(output_file, dpi=300, bbox_inches="tight")
print(f"\nVisualization saved as: {output_file}")
# Show the plot
plt.show()
# Print detailed statistics
print("\n" + "=" * 60)
print("DETAILED STATISTICS (MAX VALUES ALIGNED)")
print("=" * 60)
print("\nDataset 1 (2025-06-10_16_26.CSV):")
print(f" Records: {len(df1)}")
print(f" Mean HR: {df1['hr'].mean():.1f} bpm")
print(f" Median HR: {df1['hr'].median():.1f} bpm")
print(f" Max HR: {max_hr1:.1f} bpm (at original index {max_idx1})")
print(f" Std Dev: {df1['hr'].std():.1f} bpm")
print(f" Range: {df1['hr'].min():.1f} - {df1['hr'].max():.1f} bpm")
print("\nDataset 2 (history_20250610_165414_HR.csv):")
print(f" Records: {len(df2)}")
print(f" Mean HR: {df2['HR'].mean():.1f} bpm")
print(f" Median HR: {df2['HR'].median():.1f} bpm")
print(f" Max HR: {max_hr2:.1f} bpm (at original index {max_idx2})")
print(f" Std Dev: {df2['HR'].std():.1f} bpm")
print(f" Range: {df2['HR'].min():.1f} - {df2['HR'].max():.1f} bpm")
print(f"\nAlignment Info:")
print(f" Max HR difference: {abs(max_hr1 - max_hr2):.1f} bpm")
print(
f" Time shift applied: Dataset 1 shifted by -{max_idx1}s, Dataset 2 shifted by -{max_idx2}s"
)
print("\n" + "=" * 60)
return df1, df2
if __name__ == "__main__":
print("Starting HR data visualization with max value alignment...")
df1, df2 = load_and_visualize_hr_data()
print("Visualization complete!")