.. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: .. "csi/HPE_tutorial.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html .. note:: :class: sphx-glr-download-link-note :ref:`Go to the end ` to download the full example code. .. rst-class:: sphx-glr-example-title .. _sphx_glr_csi_HPE_tutorial.py: CSI human pose estimation Tutorial ============================================================== .. GENERATED FROM PYTHON SOURCE LINES 7-10 .. code-block:: Python # !pip install pysensing .. GENERATED FROM PYTHON SOURCE LINES 11-13 In this tutorial, we will be implementing codes for CSI human pose estimation task .. GENERATED FROM PYTHON SOURCE LINES 13-23 .. code-block:: Python import sys sys.path.append('../..') import torch import pysensing.csi.dataset.get_dataloader as get_dataloader import pysensing.csi.model.get_model as get_model import pysensing.csi.inference.predict as predict import pysensing.csi.inference.embedding as embedding import pysensing.csi.inference.train as train import itertools .. GENERATED FROM PYTHON SOURCE LINES 24-26 Load the data ----------------------------------- .. GENERATED FROM PYTHON SOURCE LINES 26-41 .. code-block:: Python # MMFi, the first multi-modal non-intrusive 4D human dataset with 27 daily or rehabilitation action categories, leveraging LiDAR, mmWave radar, and WiFi signals for device-free human sensing.. MM-Fi consists of over 320k synchronized frames of five modalities from 40 human subjects. # WiPose consists of 166,600 packets of .mat format. These packets contain pose annotations and WiFi channel state information (CSI) of 12 different actions performed by 12 volunteers, including wave, walk, throw, run, push, pull, jump, crouch, circle, sit down, stand up, and bend. train_loader, val_loader = get_dataloader.load_hpe_dataset(dataset_name='MMFi', protocol='protocol1', split_to_use='random_split', random_seed=0, random_ratio=0.8, batch_size=1, data_unit='frame') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") for i, data in enumerate(train_loader): csi = data['input_wifi-csi'].type(torch.FloatTensor).to(device) label = data['output'].to(device) break print('csi: ', csi) print('label: ', label) .. rst-class:: sphx-glr-script-out .. code-block:: none csi: tensor([[[[0.4959, 0.3698, 0.4305, ..., 0.5540, 0.3203, 0.4683], [0.5358, 0.3789, 0.4509, ..., 0.5767, 0.3517, 0.4789], [0.5720, 0.4160, 0.4838, ..., 0.6170, 0.3651, 0.5082], ..., [0.5376, 0.3919, 0.4655, ..., 0.5882, 0.3707, 0.5624], [0.5396, 0.3919, 0.4464, ..., 0.5600, 0.3339, 0.5320], [0.5026, 0.3661, 0.4382, ..., 0.5543, 0.3112, 0.5081]], [[0.3293, 0.3377, 0.3254, ..., 0.3707, 0.3036, 0.3367], [0.3364, 0.3417, 0.3309, ..., 0.3815, 0.3167, 0.3211], [0.3141, 0.3141, 0.3115, ..., 0.3842, 0.3018, 0.3595], ..., [0.7548, 0.7495, 0.7432, ..., 0.7937, 0.7241, 0.8061], [0.7348, 0.7322, 0.7272, ..., 0.7666, 0.7045, 0.7786], [0.7033, 0.7063, 0.7006, ..., 0.7374, 0.6695, 0.7487]], [[0.3067, 0.0000, 0.5128, ..., 0.4787, 0.3358, 0.2265], [0.3322, 0.0148, 0.5342, ..., 0.5046, 0.3789, 0.2402], [0.3582, 0.0409, 0.5595, ..., 0.5348, 0.3851, 0.2790], ..., [0.4229, 0.0840, 0.6161, ..., 0.5881, 0.4484, 0.3661], [0.4183, 0.0692, 0.5989, ..., 0.5565, 0.4307, 0.3482], [0.3881, 0.0507, 0.5857, ..., 0.5323, 0.3885, 0.3010]]]], device='cuda:0') label: tensor([[[-0.1949, 0.0232, 3.1783], [-0.3006, 0.0347, 3.1546], [-0.2652, 0.4270, 3.1546], [-0.2454, 0.8219, 3.1567], [-0.0892, 0.0118, 3.2019], [-0.0895, 0.4030, 3.1671], [-0.0898, 0.7999, 3.1667], [-0.1860, -0.2286, 3.1815], [-0.1770, -0.4805, 3.1848], [-0.2033, -0.6012, 3.1786], [-0.2065, -0.6756, 3.2086], [-0.0321, -0.4293, 3.1972], [-0.0321, -0.2042, 3.3059], [-0.0321, 0.0455, 3.3032], [-0.2957, -0.4108, 3.1786], [-0.3557, -0.1720, 3.1322], [-0.3557, 0.0397, 3.1786]]], device='cuda:0') .. GENERATED FROM PYTHON SOURCE LINES 42-45 Load the model ----------------------------------- For MMFi dataset, model zoo contains WPNet and WPFormer .. GENERATED FROM PYTHON SOURCE LINES 45-49 .. code-block:: Python model = get_model.load_hpe_model('MMFi', 'WPNet') print(model) .. rst-class:: sphx-glr-script-out .. code-block:: none WPNet( (encoder_conv1): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (encoder_bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (encoder_relu): ReLU(inplace=True) (encoder_maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (encoder_layer1): Sequential( (0): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (1): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (2): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (encoder_layer2): Sequential( (0): BasicBlock( (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (2): BasicBlock( (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (3): BasicBlock( (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (encoder_layer3): Sequential( (0): BasicBlock( (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (2): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (3): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (4): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (5): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (encoder_layer4): Sequential( (0): BasicBlock( (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (2): BasicBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (decode): Sequential( (0): Conv2d(512, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): Tanh() (3): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), bias=False) ) (m): AvgPool2d(kernel_size=(1, 4), stride=(1, 4), padding=0) (bn1): BatchNorm2d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (rl): ReLU(inplace=True) ) .. GENERATED FROM PYTHON SOURCE LINES 50-52 Model train ------------------------ .. GENERATED FROM PYTHON SOURCE LINES 52-59 .. code-block:: Python criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) epoch_num = 1 train_loader_small = list(itertools.islice(train_loader, 10)) train.hpe_train(train_loader_small, model, epoch_num, optimizer, criterion, device) .. rst-class:: sphx-glr-script-out .. code-block:: none Epoch:1, Loss:2.923334193 .. GENERATED FROM PYTHON SOURCE LINES 60-62 Model inference ------------------------ .. GENERATED FROM PYTHON SOURCE LINES 62-68 .. code-block:: Python model = get_model.load_pretrain(model, 'MMFi', 'WPNet', device=device) output = predict.hpe_predict(csi, 'MMFi', model, device).to(device) print('output: ', output) .. rst-class:: sphx-glr-script-out .. code-block:: none Downloading model:https://pysensing.oss-ap-southeast-1.aliyuncs.com/pretrain/csi/HPE/MMFi_WPNet.pth output: tensor([[[ 5.0992e-02, 6.4543e-03, 3.2104e+00], [-9.3468e-02, -3.5907e-03, 3.1605e+00], [-1.1224e-01, 4.0202e-01, 3.1648e+00], [-1.0948e-01, 7.5562e-01, 3.2063e+00], [ 1.4737e-01, -1.7280e-03, 3.2240e+00], [ 1.8207e-01, 3.8094e-01, 3.1877e+00], [ 2.1005e-01, 8.1671e-01, 3.1757e+00], [ 7.4897e-02, -2.2392e-01, 3.1735e+00], [ 3.6461e-03, -5.1878e-01, 3.1128e+00], [-1.9062e-02, -6.1288e-01, 3.0499e+00], [ 3.5897e-03, -6.8750e-01, 3.1184e+00], [ 1.5305e-01, -4.9971e-01, 3.1634e+00], [ 2.1812e-01, -3.5373e-01, 3.1899e+00], [ 1.6082e-01, -2.1695e-01, 3.1361e+00], [-1.3082e-01, -4.9330e-01, 3.1443e+00], [-1.2734e-01, -3.6787e-01, 3.1627e+00], [-1.2026e-01, -1.9146e-01, 3.0761e+00]]], device='cuda:0') .. GENERATED FROM PYTHON SOURCE LINES 69-71 Evaluate the loss ------------------------ .. GENERATED FROM PYTHON SOURCE LINES 71-76 .. code-block:: Python criterion = torch.nn.MSELoss().to(device) loss = criterion(output, label) print(loss) .. rst-class:: sphx-glr-script-out .. code-block:: none tensor(0.0217, device='cuda:0') .. GENERATED FROM PYTHON SOURCE LINES 77-79 Generate embedding ------------------------ .. GENERATED FROM PYTHON SOURCE LINES 79-84 .. code-block:: Python csi_embedding = embedding.hpe_csi_embedding(csi, 'MMFi', model, device) print('csi_embedding: ', csi_embedding) .. rst-class:: sphx-glr-script-out .. code-block:: none csi_embedding: tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.7207, 0.6279, 0.0000, 0.0000], ..., [ 0.7979, 1.4015, 2.1838, 1.5713], [ 3.3422, 7.1199, 8.0342, 5.6333], [ 2.3271, 6.4291, 7.5174, 6.5268]], [[ 5.1117, 5.4911, 3.6401, 2.0275], [ 1.3373, 0.9270, 2.0598, 1.3111], [ 0.3016, 0.0000, 0.1857, 0.0430], ..., [ 0.8771, 1.5256, 1.5693, 0.7322], [ 3.1440, 5.5298, 6.2467, 3.3496], [ 1.3018, 2.8783, 3.8427, 2.4360]], [[ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.3303, 0.1366, 0.1804, 0.0597], ..., [ 0.2301, 0.4045, 0.2115, 0.0000], [ 3.0892, 5.3866, 8.1058, 5.6260], [ 4.3345, 7.9370, 11.1127, 8.1078]], ..., [[ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0418, 0.0000, 0.0625], [ 0.0234, 0.0000, 0.0000, 0.0000], ..., [ 0.5418, 0.8255, 0.9208, 0.5228], [ 3.7494, 6.6417, 7.3521, 4.5970], [ 6.0892, 9.4266, 10.7914, 7.5804]], [[ 1.1545, 1.2559, 1.2325, 0.9917], [ 0.7374, 0.5083, 1.2636, 2.3046], [ 2.1151, 2.7292, 2.0691, 2.1187], ..., [ 0.3616, 0.0277, 0.6527, 1.0221], [ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000]], [[ 0.1146, 0.2527, 0.1733, 0.0000], [ 0.1598, 0.3464, 0.2772, 0.0000], [ 0.5230, 0.4251, 0.2954, 0.4056], ..., [ 0.4348, 0.7111, 0.8116, 0.6061], [ 0.0000, 0.0000, 0.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 0.0000]]]], device='cuda:0', grad_fn=) .. GENERATED FROM PYTHON SOURCE LINES 85-86 And that's it. We're done with our CSI human pose estimation tutorials. Thanks for reading. .. rst-class:: sphx-glr-timing **Total running time of the script:** (1 minutes 10.555 seconds) .. _sphx_glr_download_csi_HPE_tutorial.py: .. only:: html .. container:: sphx-glr-footer sphx-glr-footer-example .. container:: sphx-glr-download sphx-glr-download-jupyter :download:`Download Jupyter notebook: HPE_tutorial.ipynb ` .. container:: sphx-glr-download sphx-glr-download-python :download:`Download Python source code: HPE_tutorial.py ` .. only:: html .. rst-class:: sphx-glr-signature `Gallery generated by Sphinx-Gallery `_