Hongyang Li
  1. TAPTRv3: Spatial and Temporal Context Foster Robust Tracking of Any Point in Long Video
    Jinyuan Qu*, Hongyang Li*, Shilong Liu, Tianhe Ren, Zhaoyang Zeng, Lei Zhang
    ArXiv, 2024
    @article{Qu2024taptrv3,
                title={{TAPTRv3: Spatial and Temporal Context Foster Robust Tracking of Any Point in Long Video}},
                author={Qu, Jinyuan and Li, Hongyang and Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Zhang, Lei},
                journal={arXiv preprint},
                year={2024}
              }
        
  2. TAPTRv2: Attention-based Position Update Improves Tracking Any Point
    Hongyang Li, Hao Zhang, Shilong Liu, Zhaoyang Zeng, Feng Li, Tianhe Ren, Bohan Li, Lei Zhang
    NeurIPS, 2024
    @article{li2024taptrv2,
              title={TAPTRv2: Attention-based Position Update Improves Tracking Any Point},
              author={Li, Hongyang and Zhang, Hao and Liu, Shilong and Zeng, Zhaoyang and Li, Feng and Ren, Tianhe and Bohan Li and Zhang, Lei},
              journal={arXiv preprint arXiv:2407.16291},
              year={2024}
            }
      
  3. TAPTR: Tracking Any Point with Transformers as Detection
    Hongyang Li, Hao Zhang, Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Lei Zhang
    ECCV, 2024
    @article{li2024taptr,
            title={TAPTR: Tracking Any Point with Transformers as Detection},
            author={Li, Hongyang and Zhang, Hao and Liu, Shilong and Zeng, Zhaoyang and Ren, Tianhe and Li, Feng and Zhang, Lei},
            journal={arXiv preprint arXiv:2403.13042},
            year={2024}
          }
    
  4. LLaVA-Grounding: Grounded Visual Chat with Large Multimodal Models
    Hao Zhang*, Hongyang Li*, Feng Li, Tianhe Ren, Xueyan Zou, Shilong Liu, Shijia Huang, Jianfeng Gao, Lei Zhang, Chunyuan Li, Jianwei Yang
    ECCV, 2024
    @article{zhang2023llava,
          title={Llava-grounding: Grounded visual chat with large multimodal models},
          author={Zhang, Hao and Li, Hongyang and Li, Feng and Ren, Tianhe and Zou, Xueyan and Liu, Shilong and Huang, Shijia and Gao, Jianfeng and Zhang, Lei and Li, Chunyuan and others},
          journal={arXiv preprint arXiv:2312.02949},
          year={2023}
        }
    
  5. Visual In-Context Prompting.
    Feng Li, Qing Jiang, Hao Zhang, Tianhe Ren, Shilong Liu, Xueyan Zou, Huaizhe Xu, Hongyang Li, Chunyuan Li, Jianwei Yang, Lei Zhang, Jianfeng Gao
    CVPR, 2024
    @inproceedings{li2024visual,
          title={Visual in-context prompting},
          author={Li, Feng and Jiang, Qing and Zhang, Hao and Ren, Tianhe and Liu, Shilong and Zou, Xueyan and Xu, Huaizhe and Li, Hongyang and Yang, Jianwei and Li, Chunyuan and others},
          booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
          pages={12861--12871},
          year={2024}
        }
    
  6. Grounded SAM: Assembling Open-World Models for Diverse Visual Tasks.
    Tianhe Ren, Shilong Liu, Ailing Zeng, Jing Lin, Kunchang Li, He Cao, Jiayu Chen, Xinyu Huang, Yukang Chen, Feng Yan, Zhaoyang Zeng, Hao Zhang, Feng Li, Jie Yang, Hongyang Li, Qing Jiang, Lei Zhang
    arXiv, 2024
    @article{ren2024grounded,
          title={Grounded sam: Assembling open-world models for diverse visual tasks},
          author={Ren, Tianhe and Liu, Shilong and Zeng, Ailing and Lin, Jing and Li, Kunchang and Cao, He and Chen, Jiayu and Huang, Xinyu and Chen, Yukang and Yan, Feng and others},
          journal={arXiv preprint arXiv:2401.14159},
          year={2024}
        }
    
  7. A Strong and Reproducible Object Detector with Only Public Datasets
    Tianhe Ren, Jianwei Yang, Shilong Liu, Ailing Zeng, Feng Li, Hao Zhang, Hongyang Li, Zhaoyang Zeng, Lei Zhang
    Arxiv, 2023
    @article{ren2023strong,
          title={A strong and reproducible object detector with only public datasets},
          author={Ren, Tianhe and Yang, Jianwei and Liu, Shilong and Zeng, Ailing and Li, Feng and Zhang, Hao and Li, Hongyang and Zeng, Zhaoyang and Zhang, Lei},
          journal={arXiv preprint arXiv:2304.13027},
          year={2023}
        }
    
  8. DFA3D: 3D Deformable Attention For 2D-to-3D Feature Lifting
    Hongyang Li*, Hao Zhang*, Zhaoyang Zeng, Shilong Liu, Feng Li, Tianhe Ren, Lei Zhang
    ICCV, 2023
    @inproceedings{li2023dfa3d,
          title={DFA3D: 3D Deformable Attention For 2D-to-3D Feature Lifting},
          author={Li, Hongyang and Zhang, Hao and Zeng, Zhaoyang and Liu, Shilong and Li, Feng and Ren, Tianhe and Zhang, Lei},
          booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
          pages={6684--6693},
          year={2023}
        }
    
  9. Detection Transformer with Stable Matching
    Shilong Liu, Tianhe Ren, Jiayu Chen, Zhaoyang Zeng, Hao Zhang, Feng Li, Hongyang Li, Jun Huang, Hang Su, Jun Zhu, Lei Zhang
    CVPR, 2023
    @inproceedings{liu2023detection,
          title={Detection transformer with stable matching},
          author={Liu, Shilong and Ren, Tianhe and Chen, Jiayu and Zeng, Zhaoyang and Zhang, Hao and Li, Feng and Li, Hongyang and Huang, Jun and Su, Hang and Zhu, Jun and others},
          booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
          pages={6491--6500},
          year={2023}
        }
    
  10. DA-BEV: Depth Aware BEV Transformer for 3D Object Detection
    Hao Zhang*, Hongyang Li*, Xingyu Liao, Feng Li, Shilong Liu, Lionel M Ni, Lei Zhang
    arxiv, 2023
    @article{zhang2023bev,
          title={Da-bev: Depth aware bev transformer for 3d object detection},
          author={Zhang, Hao and Li, Hongyang and Liao, Xingyu and Li, Feng and Liu, Shilong and Ni, Lionel M and Zhang, Lei},
          journal={arXiv e-prints},
          pages={arXiv--2302},
          year={2023}
        }
    
  11. Lite DETR : An Interleaved Multi-Scale Encoder for Efficient DETR
    Feng Li, Ailing Zeng, Shilong Liu, Hao Zhang, Hongyang Li, Lei Zhang, Lionel M. Ni
    CVPR, 2023
    @inproceedings{li2023lite,
          title={Lite DETR: An interleaved multi-scale encoder for efficient detr},
          author={Li, Feng and Zeng, Ailing and Liu, Shilong and Zhang, Hao and Li, Hongyang and Zhang, Lei and Ni, Lionel M},
          booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
          pages={18558--18567},
          year={2023}
        }
    
  12. detrex: Benchmarking detection transformers
    Tianhe Ren, Shilong Liu, Feng Li, Hao Zhang, Ailing Zeng, Jie Yang, Xingyu Liao, Ding Jia, Hongyang Li, He Cao, Jianan Wang, Zhaoyang Zeng, Xianbiao Qi, Yuhui Yuan, Jianwei Yang, Lei Zhang
    Arxiv, 2023
    @article{ren2023detrex,
          title={detrex: Benchmarking detection transformers},
          author={Ren, Tianhe and Liu, Shilong and Li, Feng and Zhang, Hao and Zeng, Ailing and Yang, Jie and Liao, Xingyu and Jia, Ding and Li, Hongyang and Cao, He and others},
          journal={arXiv preprint arXiv:2306.07265},
          year={2023}
        }
    
  13. DCL-Net: Deep Correspondence Learning Network for 6D Pose Estimation
    Hongyang Li*, Jiehong Lin*, Kui Jia
    ECCV, 2022
    @inproceedings{li2022dcl,
          title={DCL-Net: Deep Correspondence Learning Network for 6D Pose Estimation},
          author={Li, Hongyang and Lin, Jiehong and Jia, Kui},
          booktitle={European Conference on Computer Vision},
          pages={369--385},
          year={2022},
          organization={Springer}
        }
    
  14. Sparse Steerable Convolutions: An Efficient Learning of SE(3)-Equivariant Features for Estimation and Tracking of Object Poses in 3D Space
    Jiehong Lin*, Hongyang Li*, Kui Jia
    NeurIPS, 2021
    @article{lin2021sparse,
          title={Sparse steerable convolutions: An efficient learning of se (3)-equivariant features for estimation and tracking of object poses in 3d space},
          author={Lin, Jiehong and Li, Hongyang and Chen, Ke and Lu, Jiangbo and Jia, Kui},
          journal={Advances in Neural Information Processing Systems},
          volume={34},
          pages={16779--16790},
          year={2021}
        }