Shortcuts

Tutorial for UWB Localization

import torch
import torch.nn as nn
import os

from pysensing.uwb.datasets.get_dataloader import *
from pysensing.uwb.models.get_model import *
from pysensing.uwb.training.localization import *
from pysensing.uwb.inference.predict import *
from pysensing.uwb.inference.embedding import *

Download Data from Cloud Storage

Open the following linke in your browser to download Localization datasets:

[Download Pedestrian_Tracking Dataset](https://pysensing.oss-ap-southeast-1.aliyuncs.com/data/uwb/Pedestrian_Tracking.zip) […]()

Unzip the downloaded file and move to your data folder. For HAR, the data folder should look like this: ` |---data |------|---localization |------|------|---Pedestrian_Tracking |------|------|------|---processed_data |------|------|------|------|---AnchorPos.mat |------|------|------|------|---Bg_CIR_VAR.mat |------|------|------|------|---Dyn_CIR_VAR.mat |------|------|------|---raw_data ...... `

Load the data

Human action recognition dataset:

Human Tracking Dataset - UWB size : n x 1 x 500 x 2

Dataset name choices are: - ‘pedestrian_tracking_mod1_CIR’ - ‘pedestrian_tracking_mod2_CIR’ - ‘pedestrian_tracking_mod3_CIR’ - ‘pedestrian_tracking_mod1_Var’ - ‘pedestrian_tracking_mod2_Var’ - ‘pedestrian_tracking_mod3_Var’

root = './data'
train_loader, val_loader, test_loader = load_localization_dataset(root, 'pedestrian_tracking_mod1_CIR')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for data in train_loader:
    x, y = data
    print(x.size())
    print(y.size())
    break
Loading pedestrian tracking CIR dataset in mod 1...
torch.Size([64, 1, 500, 2])
torch.Size([64, 1])

Load the model

Model zoo: ResNet

model = load_localization_model(dataset_name = 'human_tracking', model_name = 'resnet')
print(model)
human_tracking_resnet(
  (convblock1): Sequential(
    (0): Conv2d(1, 8, kernel_size=(10, 1), stride=(1, 1), padding=(4, 0))
    (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (maxpool1): MaxPool2d(kernel_size=(10, 1), stride=(5, 1), padding=0, dilation=1, ceil_mode=False)
  (convblock2): Sequential(
    (0): Conv2d(8, 16, kernel_size=(4, 2), stride=(1, 1), padding=(1, 0))
    (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (maxpool2): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  (block1): Block(
    (conv1): Conv2d(16, 32, kernel_size=(4, 1), stride=(2, 1), padding=(1, 0), bias=False)
    (batch_norm1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (conv2): Conv2d(32, 32, kernel_size=(4, 1), stride=(1, 1), padding=(1, 0), bias=False)
    (batch_norm2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
    (res): Sequential(
      (0): Conv2d(16, 32, kernel_size=(1, 1), stride=(2, 1), bias=False)
      (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (block2): Block(
    (conv1): Conv2d(32, 64, kernel_size=(4, 1), stride=(2, 1), padding=(1, 0), bias=False)
    (batch_norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (conv2): Conv2d(64, 64, kernel_size=(4, 1), stride=(1, 1), padding=(1, 0), bias=False)
    (batch_norm2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
    (res): Sequential(
      (0): Conv2d(32, 64, kernel_size=(1, 1), stride=(2, 1), bias=False)
      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (block3): Block(
    (conv1): Conv2d(64, 128, kernel_size=(4, 1), stride=(2, 1), padding=(1, 0), bias=False)
    (batch_norm1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (conv2): Conv2d(128, 128, kernel_size=(4, 1), stride=(1, 1), padding=(1, 0), bias=False)
    (batch_norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
    (res): Sequential(
      (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 1), bias=False)
      (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (maxpool3): MaxPool2d(kernel_size=(7, 1), stride=(7, 1), padding=0, dilation=1, ceil_mode=False)
  (fc): Sequential(
    (0): Linear(in_features=128, out_features=10, bias=True)
    (1): Linear(in_features=10, out_features=1, bias=True)
  )
)

Model train

criterion = nn.CrossEntropyLoss()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

localization_training(
    root = root,
    dataset_name='pedestrian_tracking_mod1_CIR',
    model_name='resnet',
    num_epochs=1,
    learning_rate=0.001,
    save_weights=True,
)
Loading pedestrian tracking CIR dataset in mod 1...
Epoch:1,MAE Loss:26.150909321

Model inference

localization_predictor = predictor(
    task='localization',
    dataset_name='human_tracking',
    model_name='resnet',
    pt_weights = './human_tracking_weights.pth'
)
for data in test_loader:
    x, y = data
    break
outputs = localization_predictor.predict(x)
print("output shape:", outputs.shape)
Pretrained weights loaded.
output shape: torch.Size([256, 1])

Generate embedding

  • noted that the model_name variable defined in load_model function represents the model structure name, and in load_pretrain_weights function represents the model structure and pretrain dataset name.

model = load_localization_model(dataset_name = 'human_tracking', model_name = 'resnet')
model = load_pretrain_weights(model, dataset_name = 'human_tracking', model_name = 'CIR_model', device=device)
uwb_embedding = localization_uwb_embedding(x, model, device)
print('uwb_embedding shape: ', uwb_embedding.shape)
uwb_embedding shape:  torch.Size([256, 128])

Total running time of the script: (0 minutes 28.170 seconds)

Gallery generated by Sphinx-Gallery

Docs

Access documentation for Pysensing

View Docs

Tutorials

Get started with tutorials and examples

View Tutorials

Get Started

Find resources and how to start using pysensing

View Resources