diff --git a/projects/TPVFormer/README.md b/projects/TPVFormer/README.md index 9a0681bf88..fcc06ec679 100644 --- a/projects/TPVFormer/README.md +++ b/projects/TPVFormer/README.md @@ -24,7 +24,7 @@ We implement TPVFormer and provide the results and checkpoints on nuScenes datas In MMDetection3D's root directory, run the following command to train the model: -1. Downloads the [pretrained backbone weights](<>) to checkpoints/ +1. Downloads the [pretrained backbone weights](https://download.openmmlab.com/mmdetection3d/v1.1.0_models/tpvformer/tpvformer_8xb1-2x_nus-seg/tpvformer_pretrained_fcos3d_r101_dcn.pth) to checkpoints/ 2. For example, to train TPVFormer on 8 GPUs, please use diff --git a/projects/TPVFormer/configs/tpvformer_8xb1-2x_nus-seg.py b/projects/TPVFormer/configs/tpvformer_8xb1-2x_nus-seg.py index 7861c6f13e..70021d8ff5 100644 --- a/projects/TPVFormer/configs/tpvformer_8xb1-2x_nus-seg.py +++ b/projects/TPVFormer/configs/tpvformer_8xb1-2x_nus-seg.py @@ -258,7 +258,7 @@ stage_with_dcn=(False, False, True, True), init_cfg=dict( type='Pretrained', - checkpoint='checkpoints/tpvformer_r101_dcn_fcos3d_pretrain.pth', + checkpoint='checkpoints/tpvformer_pretrained_fcos3d_r101_dcn.pth', prefix='backbone.')), neck=dict( type='mmdet.FPN', @@ -270,7 +270,7 @@ relu_before_extra_convs=True, init_cfg=dict( type='Pretrained', - checkpoint='checkpoints/tpvformer_r101_dcn_fcos3d_pretrain.pth', + checkpoint='checkpoints/tpvformer_pretrained_fcos3d_r101_dcn.pth', prefix='neck.')), encoder=dict( type='TPVFormerEncoder',