Published by onesixx on

OpenMMLab::mmPose::Animal Pose



공식 GitHub :

$ conda create -n openmmlab python=3.8 
$ conda activate openmmlab

$ conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia

# install dependencies: (use cu111 because colab has CUDA 11.1)
# pip install torch==1.10.0+cu111 torchvision==0.11.0+cu111 -f

$ pip install -U openmim
# install mmcv-full thus we could use CUDA operators
$ mim install mmcv
#%pip install mmcv-full -f

# clone mmpose repo
$ rm -rf mmpose
$ git clone
$ cd mmpose

# install mmpose dependencies
$ pip install -r requirements.txt
# install mmpose in develop mode
# "-v" means verbose, or more output
# "-e" means installing a project in editable mode,
# 따라서 코드에 가지고 있는  local modifications은 재설치하지 않고 즉시 적용됩니다
$ pip install -e .

# install mmdet for inference demo
$ pip install mmdet

Inference Demo

tutorial :

import cv2
import os
import matplotlib.pyplot as plt

from mmpose.apis import inference_top_down_pose_model 
from mmpose.apis import init_pose_model
from mmpose.apis import vis_pose_result
from mmpose.apis import process_mmdet_results

from mmdet.apis import inference_detector, init_detector

#os.getcwd()  # '/home/oschung_skcc/git/mmpose/sixx'
pose_checkpoint_url = ""
det_checkpoint_url =  ""

# !w g et -P checkfiles ""
# !w g et -P checkfiles ""
pose_config = 'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/'
pose_checkpoint = os.path.join('checkfiles', os.path.basename(pose_checkpoint_url))
det_config = 'demo/mmdetection_cfg/'
det_checkpoint = os.path.join('checkfiles', os.path.basename(det_checkpoint_url))

# initialize model 
pose_model = init_pose_model(pose_config, pose_checkpoint)   # pose model
det_model  = init_detector(det_config, det_checkpoint)       # detector

### inference 
test_img = 'tests/data/coco/000000196141.jpg'
# inference detection
mmdet_results        = inference_detector(det_model, test_img)              
mmdet_results_person = process_mmdet_results(mmdet_results, cat_id=1) # extract person (COCO_ID=1) bboxes from  detection results
# inference pose
pose_results, returned_outputs = inference_top_down_pose_model(
    pose_model, test_img, mmdet_results_person ,
    bbox_thr=0.3, format='xyxy',

# show pose estimation results
vis_result = vis_pose_result(
    pose_model, test_img, pose_results,,
# reduce image size
vis_result = cv2.resize(vis_result, dsize=None, fx=0.5, fy=0.5)

### way1)
# Display the image using matplotlib's imshow function
plt.imshow(cv2.cvtColor(vis_result, cv2.COLOR_BGR2RGB))

### way2)
from IPython.display import Image, display
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
    file_name = os.path.join(tmpdir, 'pose_results.png')
    cv2.imwrite(file_name, vis_result)
Categories: vision


Blog Owner

Notify of

Inline Feedbacks
View all comments
Would love your thoughts, please comment.x