mmpose/projects/uniformer/configs/td-hm_uniformer-b-8xb32-210...

135 lines
3.8 KiB
Python

_base_ = ['mmpose::_base_/default_runtime.py']
custom_imports = dict(imports='projects.uniformer.models')
# runtime
train_cfg = dict(max_epochs=210, val_interval=10)
# enable DDP training when pretrained model is used
find_unused_parameters = True
# optimizer
optim_wrapper = dict(optimizer=dict(
type='Adam',
lr=5e-4,
))
# learning policy
param_scheduler = [
dict(
type='LinearLR', begin=0, end=500, start_factor=0.001,
by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=0,
end=210,
milestones=[170, 200],
gamma=0.1,
by_epoch=True)
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=256)
# hooks
default_hooks = dict(checkpoint=dict(save_best='coco/AP', rule='greater'))
# codec settings
codec = dict(
type='MSRAHeatmap', input_size=(320, 448), heatmap_size=(80, 112), sigma=3)
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='UniFormer',
embed_dims=[64, 128, 320, 512],
depths=[5, 8, 20, 7],
head_dim=64,
drop_path_rate=0.55,
use_checkpoint=False, # whether use torch.utils.checkpoint
use_window=False, # whether use window MHRA
use_hybrid=False, # whether use hybrid MHRA
init_cfg=dict(
# Set the path to pretrained backbone here
type='Pretrained',
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
'uniformer/uniformer_base_in1k.pth' # noqa
)),
head=dict(
type='HeatmapHead',
in_channels=512,
out_channels=17,
final_layer=dict(kernel_size=1),
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
# base dataset settings
dataset_type = 'CocoDataset'
data_mode = 'topdown'
data_root = 'data/coco/'
# pipelines
train_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(type='RandomBBoxTransform'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=32,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/person_keypoints_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=256,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/person_keypoints_val2017.json',
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# evaluators
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/person_keypoints_val2017.json')
test_evaluator = val_evaluator