由于作者大部分工作在ubuntu完成,所以主要以ubuntu系统为主。
1.配置conda环境:去官方网装好anaconda.
打开终端,创建新的conda环境并进入:
conda create --name labelme python=3.6 conda activate labelme2.安装labelme:
# conda install -c conda-forge pyside2 # conda install pyqt # pip install pyqt5 # pyqt5 can be installed via pip on python3 pip install labelme # or you can install everything by conda command # conda install labelme -c conda-forge3.使用labelme
用法像ps的套索工具并打上标签
4.批量处理json_to_dataset此工作在window中进行
同样,先装好conda.并装好pycharm
新建一个py文件,命名为json_to_dataset,并把conda环境加入其中。
import argparse import json import os import os.path as osp import warnings import PIL.Image import yaml from labelme import utils import base64 def main(): warnings.warn("This script is aimed to demonstrate how to convert then" "JSON file to a single image dataset, and not to handlen" "multiple JSON files to generate a real-use dataset.") parser = argparse.ArgumentParser() parser.add_argument('json_file') parser.add_argument('-o', '--out', default=None) args = parser.parse_args() json_file = args.json_file if args.out is None: out_dir = osp.basename(json_file).replace('.', '_') out_dir = osp.join(osp.dirname(json_file), out_dir) else: out_dir = args.out if not osp.exists(out_dir): os.mkdir(out_dir) count = os.listdir(json_file) for i in range(0, len(count)): path = os.path.join(json_file, count[i]) if os.path.isfile(path): data = json.load(open(path)) if data['imageData']: imageData = data['imageData'] else: imagePath = os.path.join(os.path.dirname(path), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) label_name_to_value = {'_background_': 0} for shape in data['shapes']: label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value # label_values must be dense label_values, label_names = [], [] for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]): label_values.append(lv) label_names.append(ln) assert label_values == list(range(len(label_values))) lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) captions = ['{}: {}'.format(lv, ln) for ln, lv in label_name_to_value.items()] lbl_viz = utils.draw_label(lbl, img, captions) out_dir = osp.basename(count[i]).replace('.', '_') out_dir = osp.join(osp.dirname(count[i]), out_dir) if not osp.exists(out_dir): os.mkdir(out_dir) PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png')) # PIL.Image.fromarray(lbl).save(osp.join(out_dir, 'label.png')) utils.lblsave(osp.join(out_dir, 'label.png'), lbl) PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png')) with open(osp.join(out_dir, 'label_names.txt'), 'w') as f: for lbl_name in label_names: f.write(lbl_name + 'n') warnings.warn('info.yaml is being replaced by label_names.txt') info = dict(label_names=label_names) with open(osp.join(out_dir, 'info.yaml'), 'w') as f: yaml.safe_dump(info, f, default_flow_style=False) print('Saved to: %s' % out_dir) if __name__ == '__main__': main()
点击右上角json_to_dataset一栏,edit configuration.
在parameters一栏填入labelme处理好的json的文件夹.
此时按运行就会在json_to_dataset.py路径下生成dataset.
未完,待续
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)