ROOT_DIR = '/home/oschung_skcc/git'
import os
import os.path as osp
WORK_DIR = os.path.dirname(os.path.realpath(__file__)) #'mymm/kitty_tiny'
# /home/oschung_skcc/git/mmdetection/my/kitty_tiny
# $ python sixx_train.py configs/faster_rcnn_r50_fpn_1x_tidy.py
import tools.sixx_middle_dataset
# -----------------------------------------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import argparse
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
# -----------------------------------------------------------------------------
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--auto-resume', action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument('--no-validate', action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus',type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-id', type=int, default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
### --- cfg ================================================================
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
env_info_dict = collect_env()
env_info = '\
'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
#dash_line = '-'*60+'\
'
#logger.info('Environment info:\
'+dash_line + env_info +'\
'+dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
#logger.info(f'Config:\
{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
### --- datasets ===========================================================
# train용 Dataset 생성.
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
### --- model ==============================================================
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg')
)
model.init_weights()
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector( model, datasets, cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
4. sixx_inference.py
ROOT_DIR = '/home/oschung_skcc/git'
import os
import os.path as osp
# from re import I # Reqular expression operations, IgnoreCase
WORK_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = osp.join(WORK_DIR, 'data')
IMG_PREFIX = 'image_2'
ANN_PREFIX = 'label_2'
import cv2
from matplotlib import pyplot as plt
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
import numpy as np
def get_detected_img(model, imgPath, score_threshold=0.3, is_print=True):
img_array = cv2.imread(imgPath)
# plt.imshow(draw_img)
bbox_color = ( 0,255, 0) # Green
text_color = ( 0, 0, 255) # Blur
results = inference_detector(model, img_array)
for result_ind, result in enumerate(results):
if len(result)==0:
continue
result_filtered = result[ np.where(result[:, 4] > score_threshold)]
for i in range(len(result_filtered)):
# 좌상단 좌
left = int(result_filtered[i, 0])
top = int(result_filtered[i, 1])
# 우하단 좌표
right = int(result_filtered[i, 2])
bottom = int(result_filtered[i, 3])
cv2.rectangle(img_array, (left, top), (right, bottom), color=bbox_color, thickness=2)
# Class Caption
caption = f"{labels_to_names_seq[result_ind]}: {result_filtered[i, 4]}"
cv2.putText(img_array, caption, (int(left), int(top - 7)), cv2.FONT_HERSHEY_SIMPLEX, 0.37, text_color, 1)
if is_print:
print(caption)
return img_array
config_file = osp.join(WORK_DIR, 'configs/faster_rcnn_r50_fpn_1x_tidy.py')
checkpoint_file = osp.join(WORK_DIR, 'tutorial_exps/latest.pth')
model = init_detector(config_file, checkpoint_file)
imgPath = osp.join(DATA_DIR, IMG_PREFIX,'000068.jpeg')
CLASSES = ('Car', 'Truck', 'Pedestrian', 'Cyclist')
labels_to_names_seq = {i:k for i, k in enumerate(CLASSES)}
draw_img = get_detected_img(model, imgPath, score_threshold=0.3, is_print=True)
plt.figure(figsize=(4,4))#(15,10))
plt.imshow(draw_img)
Main.py main.py를 만들기 위해 참고할 inference코드 inference demo main.py의 handler작성을 위해 inference demo확인– init_detector– inference_detector 2. 로컬에서 DL모델을 실행하기 위한 소스코드를 Nuclio 플랫폼에 적용 2-1 모델을 메모리에 로딩 (init_context(context)함수를 사용하여) https://nuclio.io/docs/latest/concepts/best-practices-and-common-pitfalls/#use-init_context-instead-of-global-variable-declarations-or-function-calls 2-2 아래 프로세스를 위해 Read more…