From 98f2ba8591dfe75fa491d2bd1a34241a7611276e Mon Sep 17 00:00:00 2001 From: joncrall Date: Wed, 8 Apr 2020 14:06:39 -0400 Subject: [PATCH 1/7] Start branch for 0.5.6 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2aa56e..b8460e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -459,3 +459,5 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Added * Early and undocumented commits + +## Version 0.5.6 - Unreleased -- GitLab From aa9a31acde03d1fbb2dc75292d77eb145102e198 Mon Sep 17 00:00:00 2001 From: joncrall Date: Wed, 8 Apr 2020 16:22:47 -0400 Subject: [PATCH 2/7] wip --- super_setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/super_setup.py b/super_setup.py index 21c4f55..1bda283 100755 --- a/super_setup.py +++ b/super_setup.py @@ -702,7 +702,7 @@ def make_netharn_registry(): remotes={'public': 'git@gitlab.kitware.com:computer-vision/kwcoco.git'}, ), CommonRepo( - name='kwplot', branch='dev/0.4.4', remote='public', + name='kwplot', branch='dev/0.4.6', remote='public', remotes={'public': 'git@gitlab.kitware.com:computer-vision/kwplot.git'}, ), @@ -729,7 +729,7 @@ def make_netharn_registry(): # netharn - training harness CommonRepo( - name='netharn', branch='dev/0.5.5', remote='public', + name='netharn', branch='dev/0.5.6', remote='public', remotes={'public': 'git@gitlab.kitware.com:computer-vision/netharn.git'}, ), ] -- GitLab From 090b8153b0c50d823c3e4663e3cf32cbc8d7051d Mon Sep 17 00:00:00 2001 From: joncrall Date: Fri, 10 Apr 2020 12:12:02 -0400 Subject: [PATCH 3/7] wip --- dev/manage_snapshots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/manage_snapshots.py b/dev/manage_snapshots.py index 50264be..d880f6e 100755 --- a/dev/manage_snapshots.py +++ b/dev/manage_snapshots.py @@ -96,7 +96,7 @@ def _devcheck_remove_dead_runs(workdir, dry=True, dead_num_snap_thresh=10, group_ = copy.deepcopy(group) for i in group_: i['dpath'] = '...' + i['dpath'][-20:] - i.pop('last_modified') + # i.pop('last_modified') i['MB'] = i['size'] * 1e-3 print(ub.repr2(group_, nl=1)) -- GitLab From d41c8c952f4a55428fd3c23a4e78c5ce6dd95cde Mon Sep 17 00:00:00 2001 From: joncrall Date: Fri, 10 Apr 2020 15:58:22 -0400 Subject: [PATCH 4/7] wip --- dev/manage_snapshots.py | 287 +++++++++++++++++++++++++++++----------- 1 file changed, 213 insertions(+), 74 deletions(-) diff --git a/dev/manage_snapshots.py b/dev/manage_snapshots.py index d880f6e..44bef23 100755 --- a/dev/manage_snapshots.py +++ b/dev/manage_snapshots.py @@ -15,6 +15,71 @@ import parse import ubelt as ub +def byte_str(num, unit='auto', precision=2): + """ + Automatically chooses relevant unit (KB, MB, or GB) for displaying some + number of bytes. + + Args: + num (int): number of bytes + unit (str): which unit to use, can be auto, B, KB, MB, GB, TB, PB, EB, + ZB, or YB. + + References: + https://en.wikipedia.org/wiki/Orders_of_magnitude_(data) + + Returns: + str: string representing the number of bytes with appropriate units + + Example: + >>> num_list = [1, 100, 1024, 1048576, 1073741824, 1099511627776] + >>> result = ub.repr2(list(map(byte_str, num_list)), nl=0) + >>> print(result) + ['0.00 KB', '0.10 KB', '1.00 KB', '1.00 MB', '1.00 GB', '1.00 TB'] + """ + abs_num = abs(num) + if unit == 'auto': + if abs_num < 2.0 ** 10: + unit = 'KB' + elif abs_num < 2.0 ** 20: + unit = 'KB' + elif abs_num < 2.0 ** 30: + unit = 'MB' + elif abs_num < 2.0 ** 40: + unit = 'GB' + elif abs_num < 2.0 ** 50: + unit = 'TB' + elif abs_num < 2.0 ** 60: + unit = 'PB' + elif abs_num < 2.0 ** 70: + unit = 'EB' + elif abs_num < 2.0 ** 80: + unit = 'ZB' + else: + unit = 'YB' + if unit.lower().startswith('b'): + num_unit = num + elif unit.lower().startswith('k'): + num_unit = num / (2.0 ** 10) + elif unit.lower().startswith('m'): + num_unit = num / (2.0 ** 20) + elif unit.lower().startswith('g'): + num_unit = num / (2.0 ** 30) + elif unit.lower().startswith('t'): + num_unit = num / (2.0 ** 40) + elif unit.lower().startswith('p'): + num_unit = num / (2.0 ** 50) + elif unit.lower().startswith('e'): + num_unit = num / (2.0 ** 60) + elif unit.lower().startswith('z'): + num_unit = num / (2.0 ** 70) + elif unit.lower().startswith('y'): + num_unit = num / (2.0 ** 80) + else: + raise ValueError('unknown num={!r} unit={!r}'.format(num, unit)) + return ub.repr2(num_unit, precision=precision) + ' ' + unit + + def is_symlink_broken(path): """ Check is a path is a broken symlink. @@ -64,40 +129,93 @@ def get_file_info(fpath): return info +def session_info(dpath): + """ + Stats about a training session + """ + info = {} + snap_dpath = join(dpath, 'torch_snapshots') + snapshots = os.listdir(snap_dpath) if exists(snap_dpath) else [] + dpath = realpath(dpath) + + if True: + # Determine if we are pointed to by a nice directory or not + nice = basename(dirname(dpath)) + info['nice'] = nice + fitdir = dirname(dirname(dirname(dpath))) + nice_dpath = join(fitdir, 'nice', nice) + try: + target = realpath(ub.util_links._readlink(nice_dpath)) + except Exception: + target = None + info['linked'] = (target == dpath) + + info['dpath'] = dpath + info['num_snapshots'] = len(snapshots) + info['size'] = float(ub.cmd('du -s ' + dpath)['out'].split('\t')[0]) + if len(snapshots) > 0: + contents = [join(dpath, c) for c in os.listdir(dpath)] + timestamps = [get_file_info(c)['last_modified'] for c in contents] + unixtime = max(timestamps) + dt = datetime.datetime.fromtimestamp(unixtime) + info['last_modified'] = dt + return info + + def _devcheck_remove_dead_runs(workdir, dry=True, dead_num_snap_thresh=10, safe_num_days=7): """ - TODO: - Look for directories in runs that have no / very few snapshots - and no eval metrics that have a very old modified time and - put them into a list as candidates for deletion - + Look for directories in runs that have no / very few snapshots and no eval + metrics that have a very old modified time and put them into a list as + candidates for deletion. + + Ignore: + import sys, ubelt + sys.path.append(ubelt.expandpath('~/code/netharn/dev')) + from manage_snapshots import * # NOQA + from manage_snapshots import _devcheck_remove_dead_runs, _devcheck_manage_snapshots + workdir = '.' + import xdev + globals().update(xdev.get_func_kwargs(_devcheck_remove_dead_runs)) """ import ubelt as ub - # workdir = ub.expandpath('~/work/foobar') - + import copy print('Checking for dead / dangling sessions in your runs dir') # Find if any run directory is empty run_dpath = join(workdir, 'fit', 'runs') training_dpaths = list(glob.glob(join(run_dpath, '*/*'))) - infos = [] + all_sessions = [] for dpath in training_dpaths: - info = session_info(dpath) - infos.append(info) + session = session_info(dpath) + all_sessions.append(session) - nice_groups = ub.group_items(infos, lambda x: x['nice']) + now = datetime.datetime.now() + long_time_ago = now - datetime.timedelta(days=safe_num_days) + + for session in all_sessions: + if session['num_snapshots'] == 0: + session['decision'] = 'bad' + elif session['num_snapshots'] < dead_num_snap_thresh: + dt = session['last_modified'] + if dt < long_time_ago: + session['decision'] = 'iffy' + else: + session['decision'] = 'good' + else: + session['decision'] = 'good' + + nice_groups = ub.group_items(all_sessions, lambda x: x['nice']) for nice, group in nice_groups.items(): print(' --- {} --- '.format(nice)) group = sorted(group, key=lambda x: x['size']) - import copy group_ = copy.deepcopy(group) - for i in group_: - i['dpath'] = '...' + i['dpath'][-20:] - # i.pop('last_modified') - i['MB'] = i['size'] * 1e-3 + for item in group_: + item['dpath'] = '...' + item['dpath'][-20:] + item.pop('last_modified', None) + item['size'] = byte_str(item['size']) print(ub.repr2(group_, nl=1)) # Partion your "nice" sessions into broken and live symlinks. @@ -115,78 +233,97 @@ def _devcheck_remove_dead_runs(workdir, dry=True, dead_num_snap_thresh=10, if len(os.listdir(dpath)) == 0: empty_dpaths.append(dpath) - bad_dpaths = [] - iffy_dpaths = [] - good_dpaths = [] + decision_groups = ub.group_items(all_sessions, lambda x: x['decision']) - now = datetime.datetime.now() - long_time_ago = now - datetime.timedelta(days=safe_num_days) - - for info in infos: - if info['num_snapshots'] == 0: - bad_dpaths.append(info['dpath']) - elif info['num_snapshots'] < dead_num_snap_thresh: - dt = info['last_modified'] - if dt < long_time_ago: - iffy_dpaths.append(info['dpath']) - else: - good_dpaths.append(info['dpath']) - else: - good_dpaths.append(info['dpath']) + print('Empty dpaths: {:>4}'.format(len(empty_dpaths))) + print('Broken links: {:>4}'.format(len(broken_links))) + for key in decision_groups.keys(): + group = decision_groups[key] + size = byte_str(sum([s['size'] for s in group])) + print('{:>4} sessions: {:>4}, size={}'.format(key.capitalize(), len(group), size)) if dry: - print('Would leave {} good dpaths'.format(len(good_dpaths))) - print('NOT DELETING {} iffy dpaths'.format(len(iffy_dpaths))) - print('Would delete {} bad dpaths'.format(len(bad_dpaths))) - print('Would delete {} broken links'.format(len(broken_links))) - print('Would delete {} empty dpaths'.format(len(empty_dpaths))) + print('DRY RUN. NOT DELETING ANYTHING') else: - print('Leaving {} good dpaths'.format(len(good_dpaths))) - print('NOT DELETING {} iffy dpaths'.format(len(iffy_dpaths))) - print('Deleting delete {} bad dpaths'.format(len(bad_dpaths))) - print('Deleting delete {} broken links'.format(len(broken_links))) - print('Deleting delete {} empty dpaths'.format(len(empty_dpaths))) - # for p in iffy_dpaths: + print('LIVE RUN. DELETING bad, empty, and broken.') + print('NOT DELETING iffy and good sessions') + + # for p in iffy_sessions: # ub.delete(p) - for p in bad_dpaths: - ub.delete(p) + for info in decision_groups.get('bad', []): + ub.delete(info['dpath']) for p in empty_dpaths: ub.delete(p) for p in broken_links: - os.unlink(p) + os.unlink(info['dpath']) -def session_info(dpath): +class Session(ub.NiceRepr): """ - Stats about a training session + UNFINISHED: + NEW: object to maintain info / manipulate a specific training directory """ - info = {} - snap_dpath = join(dpath, 'torch_snapshots') - snapshots = os.listdir(snap_dpath) if exists(snap_dpath) else [] - dpath = realpath(dpath) + def __init__(session, dpath): + session.dpath = dpath + session.info = session_info(session.dpath) - if True: - # Determine if we are pointed to by a nice directory or not - nice = basename(dirname(dpath)) - info['nice'] = nice - fitdir = dirname(dirname(dirname(dpath))) - nice_dpath = join(fitdir, 'nice', nice) - try: - target = realpath(ub.util_links._readlink(nice_dpath)) - except Exception: - target = None - info['linked'] = (target == dpath) + def __nice__(session): + return repr(session.info) - info['dpath'] = dpath - info['num_snapshots'] = len(snapshots) - info['size'] = float(ub.cmd('du -s ' + dpath)['out'].split('\t')[0]) - if len(snapshots) > 0: - contents = [join(dpath, c) for c in os.listdir(dpath)] - timestamps = [get_file_info(c)['last_modified'] for c in contents] - unixtime = max(timestamps) - dt = datetime.datetime.fromtimestamp(unixtime) - info['last_modified'] = dt - return info + +def _devcheck_manage_monitor(workdir, dry=True): + # Get all the images in the monitor directories + # (this is a convention and not something netharn does by default) + run_dpath = join(workdir, 'fit', 'runs') + training_dpaths = list(glob.glob(join(run_dpath, '*/*'))) + + all_sessions = [] + for dpath in training_dpaths: + session = Session(dpath) + all_sessions.append(session) + # UNFINISHED + + all_files = [] + factor = 100 + + def _choose_action(file_infos): + import kwarray + file_infos = kwarray.shuffle(file_infos, rng=0) + n_keep = (len(file_infos) // factor) + 1 + + for info in file_infos[:n_keep]: + info['action'] = 'keep' + for info in file_infos[n_keep:]: + info['action'] = 'delete' + + for session in all_sessions: + dpath = join(session.dpath, 'monitor', 'train', 'batch') + fpaths = list(glob.glob(join(dpath, '*.jpg'))) + file_infos = [{'size': os.stat(p).st_size, 'fpath': p} + for p in fpaths] + _choose_action(file_infos) + all_files.extend(file_infos) + + dpath = join(session.dpath, 'monitor', 'vali', 'batch') + fpaths = list(glob.glob(join(dpath, '*.jpg'))) + file_infos = [{'size': os.stat(p).st_size, 'fpath': p} + for p in fpaths] + _choose_action(file_infos) + all_files.extend(file_infos) + + grouped_actions = ub.group_items(all_files, lambda x: x['action']) + + for key, group in grouped_actions.items(): + size = byte_str(sum([s['size'] for s in group])) + print('{:>4} images: {:>4}, size={}'.format(key.capitalize(), len(group), size)) + + if dry: + print('Dry run') + else: + delete = grouped_actions.get('delete', []) + delete_fpaths = [item['fpath'] for item in delete] + for p in delete_fpaths: + ub.delete(p) def _devcheck_manage_snapshots(workdir, recent=5, factor=10, dry=True): @@ -324,6 +461,8 @@ if __name__ == '__main__': CommandLine: python ~/code/netharn/dev/manage_snapshots.py + find . -iname "explit_checkpoints" -d + python ~/code/netharn/dev/manage_snapshots.py --mode=snapshots --workdir=~/work/voc_yolo2/ python ~/code/netharn/dev/manage_snapshots.py --mode=runs --workdir=~/work/voc_yolo2/ -- GitLab From 196423d1b22023702df7c8826bd0ea93368c2b64 Mon Sep 17 00:00:00 2001 From: joncrall Date: Mon, 13 Apr 2020 14:28:38 -0400 Subject: [PATCH 5/7] Improve grabdata VOC --- netharn/data/grab_voc.py | 79 +++++++++++++++++++++++++++-------- netharn/data/voc.py | 3 ++ netharn/models/yolo2/yolo2.py | 7 ++-- requirements/optional.txt | 3 +- 4 files changed, 70 insertions(+), 22 deletions(-) diff --git a/netharn/data/grab_voc.py b/netharn/data/grab_voc.py index f88742c..578f3a1 100644 --- a/netharn/data/grab_voc.py +++ b/netharn/data/grab_voc.py @@ -5,14 +5,14 @@ from os.path import dirname from os.path import relpath -def convert_voc_to_coco(): +def convert_voc_to_coco(dpath=None): # TODO: convert segmentation information classes = [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] - devkit_dpath = ensure_voc_data() + devkit_dpath = ensure_voc_data(dpath=dpath) root = out_dpath = dirname(devkit_dpath) dsets = [] @@ -40,22 +40,22 @@ def convert_voc_to_coco(): for img in dset.imgs.values(): img['file_name'] = relpath(img['file_name'], root) - import ndsampler - t1 = ndsampler.CocoDataset(join(out_dpath, 'voc-train-2007.mscoco.json')) - t2 = ndsampler.CocoDataset(join(out_dpath, 'voc-train-2012.mscoco.json')) + import kwcoco + t1 = kwcoco.CocoDataset(join(out_dpath, 'voc-train-2007.mscoco.json')) + t2 = kwcoco.CocoDataset(join(out_dpath, 'voc-train-2012.mscoco.json')) - v1 = ndsampler.CocoDataset(join(out_dpath, 'voc-val-2007.mscoco.json')) - v2 = ndsampler.CocoDataset(join(out_dpath, 'voc-val-2012.mscoco.json')) + v1 = kwcoco.CocoDataset(join(out_dpath, 'voc-val-2007.mscoco.json')) + v2 = kwcoco.CocoDataset(join(out_dpath, 'voc-val-2012.mscoco.json')) - t = ndsampler.CocoDataset.union(t1, t2) + t = kwcoco.CocoDataset.union(t1, t2) t.tag = 'voc-train' t.fpath = join(root, t.tag + '.mscoco.json') - v = ndsampler.CocoDataset.union(v1, v2) + v = kwcoco.CocoDataset.union(v1, v2) v.tag = 'voc-val' v.fpath = join(root, v.tag + '.mscoco.json') - tv = ndsampler.CocoDataset.union(t1, t2, v1, v2) + tv = kwcoco.CocoDataset.union(t1, t2, v1, v2) tv.tag = 'voc-trainval' tv.fpath = join(root, tv.tag + '.mscoco.json') @@ -84,9 +84,9 @@ def _convert_voc_split(devkit_dpath, classes, split, year, root): split, year = 'train', 2012 split, year = 'train', 2007 """ - import ndsampler + import kwcoco import xml.etree.ElementTree as ET - dset = ndsampler.CocoDataset(tag='voc-{}-{}'.format(split, year)) + dset = kwcoco.CocoDataset(tag='voc-{}-{}'.format(split, year)) for catname in classes: dset.add_category(catname) @@ -188,7 +188,7 @@ def _read_split_paths(devkit_dpath, split, year): def ensure_voc_data(dpath=None, force=False, years=[2007, 2012]): """ - Download the Pascal VOC 2007 data if it does not already exist. + Download the Pascal VOC data if it does not already exist. Example: >>> # xdoctest: +REQUIRES(--download) @@ -200,31 +200,74 @@ def ensure_voc_data(dpath=None, force=False, years=[2007, 2012]): # if force or not exists(devkit_dpath): ub.ensuredir(dpath) + def extract_tarfile(fpath, dpath='.'): + # Old way + # ub.cmd('tar xvf "{}" -C "{}"'.format(fpath1, dpath), verbout=1) + import tarfile + try: + tar = tarfile.open(fpath1) + tar.extractall(dpath) + finally: + tar.close() + fpath1 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOCcode')): - ub.cmd('tar xvf "{}" -C "{}"'.format(fpath1, dpath), verbout=1) + extract_tarfile(fpath1, dpath) if 2007 in years: # VOC 2007 train+validation data fpath2 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2007', 'ImageSets', 'Main', 'bird_trainval.txt')): - ub.cmd('tar xvf "{}" -C "{}"'.format(fpath2, dpath), verbout=1) + extract_tarfile(fpath2, dpath) # VOC 2007 test data fpath3 = ub.grabdata('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2007', 'ImageSets', 'Main', 'bird_test.txt')): - ub.cmd('tar xvf "{}" -C "{}"'.format(fpath3, dpath), verbout=1) + extract_tarfile(fpath3, dpath) if 2012 in years: # VOC 2012 train+validation data fpath4 = ub.grabdata('https://pjreddie.com/media/files/VOCtrainval_11-May-2012.tar', dpath=dpath) if force or not exists(join(dpath, 'VOCdevkit', 'VOC2012', 'ImageSets', 'Main', 'bird_trainval.txt')): - ub.cmd('tar xvf "{}" -C "{}"'.format(fpath4, dpath), verbout=1) + extract_tarfile(fpath4, dpath) return devkit_dpath +def ensure_voc_coco(dpath=None): + """ + Download the Pascal VOC data and convert it to coco, if it does exit. + + Args: + dpath (str): download directory. Defaults to "~/data/VOC". + + Returns: + Dict[str, str]: mapping from dataset tags to coco file paths. + The original datasets have keys prefixed with underscores. + The standard splits keys are train, vali, and test. + """ + if dpath is None: + dpath = ub.expandpath('~/data/VOC') + + paths = { + '_train-2007': join(dpath, 'voc-train-2007.mscoco.json'), + '_train-2012': join(dpath, 'voc-train-2007.mscoco.json'), + '_val-2007': join(dpath, 'voc-val-2007.mscoco.json'), + '_val-2012': join(dpath, 'voc-val-2012.mscoco.json'), + 'trainval': join(dpath, 'voc-trainval.mscoco.json'), + 'train': join(dpath, 'voc-train.mscoco.json'), + 'vali': join(dpath, 'voc-val.mscoco.json'), + 'test': join(dpath, 'voc-test-2007.mscoco.json'), + } + if not all(map(exists, paths.values())): + ensure_voc_data(dpath=dpath) + convert_voc_to_coco(dpath=dpath) + + return paths + + def main(): - convert_voc_to_coco() + paths = ensure_voc_coco() + print('paths = {}'.format(ub.repr2(paths, nl=1))) if __name__ == '__main__': diff --git a/netharn/data/voc.py b/netharn/data/voc.py index 1400802..2e227c7 100644 --- a/netharn/data/voc.py +++ b/netharn/data/voc.py @@ -2,6 +2,9 @@ Simple dataset for loading the VOC 2007 object detection dataset without extra bells and whistles. Simply loads the images, boxes, and class labels and resizes images to a standard size. + +THIS WILL BE DEPRECATED IN THE FUTURE. WE WILL USE THE COCO FORMAT AS A COMMON +DATA FORMAT FOR DETECTION PROBLEMS. """ from os.path import exists from os.path import join diff --git a/netharn/models/yolo2/yolo2.py b/netharn/models/yolo2/yolo2.py index d1b7e6b..4e434f4 100644 --- a/netharn/models/yolo2/yolo2.py +++ b/netharn/models/yolo2/yolo2.py @@ -258,6 +258,7 @@ class Yolo2(layers.AnalyticModule): >>> output = self(inputs) >>> batch_dets = self.coder.decode_batch(output) >>> dets = batch_dets[0] + >>> print('dets.boxes = {!r}'.format(dets.boxes)) >>> # xdoc: +REQUIRES(--show) >>> import kwplot >>> kwplot.autompl() # xdoc: +SKIP @@ -449,12 +450,13 @@ class YoloCoder(object): >>> info = dev_demodata() >>> self, output = ub.take(info, ['coder', 'outputs']) >>> batch_dets = self.decode_batch(output) - >>> dets = batch_dets[0] + >>> dets = batch_dets[0].sort().scale(info['orig_sizes'][0]) + >>> print('dets.boxes = {!r}'.format(dets.boxes)) >>> # xdoctest: +REQUIRES(--show) >>> import kwplot >>> kwplot.figure(fnum=1, doclf=True) >>> kwplot.imshow(info['rgb255'], colorspace='rgb') - >>> dets.scale(info['orig_sizes'][0]).draw() + >>> dets.draw() >>> kwplot.show_if_requested() """ import kwimage @@ -512,7 +514,6 @@ class YoloCoder(object): # Compute class_score if len(self.classes) > 1: cls_scores = torch.nn.functional.softmax(class_energy, dim=2) - cls_max, cls_max_idx = torch.max(cls_scores, 2, keepdim=True) cls_max.mul_(score) else: diff --git a/requirements/optional.txt b/requirements/optional.txt index 0d5296d..2a8035b 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -15,7 +15,8 @@ tensorboard_logger >= 0.1.0 tensorboard >= 1.8.0 sympy >= 1.3 -ndsampler >= 0.5.0 +ndsampler >= 0.5.7 +kwcoco >= 0.1.0 # pyqt5>= 5.11.2;python_version>'2.7' # -- GitLab From f7c318ed0a5c948d18cc2a28c3c507df0b5bda47 Mon Sep 17 00:00:00 2001 From: joncrall Date: Mon, 13 Apr 2020 19:03:13 -0400 Subject: [PATCH 6/7] wip --- README.rst | 42 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 944cd48..1172769 100644 --- a/README.rst +++ b/README.rst @@ -200,8 +200,46 @@ instructions, but for now they are the same. ./run_developer_setup.sh -Example: -======== +Documentation +============= + +Netharn's documentation is currently sparse. I typically do most of my +documenting in the code itself using docstrings. In the future much of this +will likely be consolidated in a read-the-docs style documentation page, but +for now you'll need to look at the code to read the docs. + +The main concept provided by netharn is the "FitHarn", which has a decent +module level docstring, and a lot of good class / method level docstrings: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/fit_harn.py + +The examples folder has better docstrings with task-level documentation: + +The simplest is the mnist example: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/mnist.py + +The CIFAR example builds on the mnist example: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/cifar.py + +I'd recommend going through those two examples, as they have the best documentation. + +The segmentation example: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/segmentation.py + +and object detection example: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/object_detection.py + +have less documentation, but provide more real-world style examples of how netharn is used. + +There is an applied segmentation example that is specific to the CAMVID dataset: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/sseg_camvid.py + +And there is an applied VOC detection example: +https://gitlab.kitware.com/computer-vision/netharn/-/blob/master/netharn/examples/yolo_voc.py + +This README also contains a toy example. + +Toy Example: +============ This following example is the doctest in ``netharn/fit_harn.py``. It demonstrates how to use NetHarn to train a model to solve a toy problem. -- GitLab From f0a17a9141a1e17561e295070764754e9df319f9 Mon Sep 17 00:00:00 2001 From: joncrall Date: Tue, 14 Apr 2020 09:36:33 -0400 Subject: [PATCH 7/7] wip --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8460e6..bf5f8ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ This changelog follows the specifications detailed in: [Keep a Changelog](https: This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), although we have not yet reached a `1.0.0` release. +## Version 0.5.6 - Unreleased + + ## Version 0.5.5 ### Added @@ -459,5 +462,3 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Added * Early and undocumented commits - -## Version 0.5.6 - Unreleased -- GitLab