diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b0fdbb520a0df3584cd38e9dfa6628a51ca233fb..b1192fbf013307cc5ed9d77e6b8f7d75f1997b25 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,11 +6,22 @@ This file should need minimal modification. Template for this files is from - ~/misc/templates/PYPKG/.gitlab-ci.yml + ~/code/xcookie/.gitlab-ci.yml + + Templates used in: + + ~/code/kwplot/.gitlab-ci.yml + ~/code/kwimage/.gitlab-ci.yml + ~/code/kwarray/.gitlab-ci.yml + ~/code/kwcoco/.gitlab-ci.yml + + ~/code/ndsampler/.gitlab-ci.yml + Enable the opencv-hack if needed, and turn on/off the desired versions of Python. + stages: - build - test @@ -31,7 +42,15 @@ stages: variables: # Change pip's cache directory to be inside the project directory since we can # only cache local items. - PIP_CACHE_DIR: "$CI_PROJECT_DIR/mb_work/cache_pip" + PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" + + except: + # Don't run the pipeline for new tags + - tags + + cache: + paths: + - .cache/pip .build_template: &build_template @@ -46,53 +65,98 @@ stages: - python -V # Print out python version for debugging script: - - python setup.py bdist_wheel --universal - - cache: - paths: - - .cache/pip + #- python setup.py bdist_wheel --universal + - python setup.py bdist_wheel artifacts: paths: - dist/*.whl -.test_full_template: &test_full_template +.common_test_template: &common_test_template # Tags define which runners will accept which jobs <<: - *common_template + variables: + # Change pip's cache directory to be inside the project directory since we can + # only cache local items. + PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" + stage: test - before_script: - - python -V # Print out python version for debugging - - export PYVER=$(python -c "import sys; print('{}{}'.format(*sys.version_info[0:2]))") - - pip install --progress-bar off virtualenv - - virtualenv venv$PYVER - - source venv$PYVER/bin/activate - - pip install --progress-bar off pip -U - - pip install --progress-bar off pip setuptools -U - - python -V # Print out python version for debugging - - pip install --progress-bar off -r requirements.txt - - pip install . - script: - ./run_tests.py - cache: - paths: - - .cache/pip - - venv/ - # Coverage is a regex that will parse the coverage from the test stdout coverage: '/TOTAL.+ ([0-9]{1,3}%)/' + #except: + # refs: + # - release + # - tags + # changes: + # - README.rst + # - CHANGELOG.md -.__gpg_heredoc__: &__gpg_heredoc__ - - | - NOTE: THESE INSTRUCTION ARE BEING MOVED AND UPDATED IN THE DEV FOLDER +# Define anchors to be used in "before_script" parts +._setup_virtualenv_template: &_setup_virtualenv_template |- + python -V # Print out python version for debugging + export PYVER=$(python -c "import sys; print('{}{}'.format(*sys.version_info[0:2]))") + pip install virtualenv + virtualenv venv$PYVER + source venv$PYVER/bin/activate + pip install pip -U + pip install pip setuptools -U + python -V # Print out python version for debugging + + +.test_minimal_strict_template: &test_minimal_strict_template + # Tags define which runners will accept which jobs + <<: + - *common_test_template + + before_script: + - *_setup_virtualenv_template + # - pip install .[tests-strict] # xcookie: +COMMENT_IF(cv2) + - pip install .[tests-strict,headless-strict] # xcookie: +UNCOMMENT_IF(cv2) + + +.test_full_strict_template: &test_full_strict_template + # Tags define which runners will accept which jobs + <<: + - *common_test_template + + before_script: + - *_setup_virtualenv_template + # - pip install .[all-strict] # xcookie: +COMMENT_IF(cv2) + - pip install .[all-strict,headless-strict] # xcookie: +UNCOMMENT_IF(cv2) + #- pip install GDAL==3.3.3 --find-links https://girder.github.io/large_image_wheels -U # xcookie: +UNCOMMENT_IF(gdal) + +.test_minimal_loose_template: &test_minimal_loose_template + # Tags define which runners will accept which jobs + <<: + - *common_test_template + + before_script: + - *_setup_virtualenv_template + # - pip install .[tests] # xcookie: +COMMENT_IF(cv2) + - pip install .[tests,headless] # xcookie: +UNCOMMENT_IF(cv2) + + +.test_full_loose_template: &test_full_loose_template + # Tags define which runners will accept which jobs + <<: + - *common_test_template + + before_script: + - *_setup_virtualenv_template + # - pip install .[all] # xcookie: +COMMENT_IF(cv2) + - pip install .[all,headless] # xcookie: +UNCOMMENT_IF(cv2) + #- pip install GDAL>=3.3.3 --find-links https://girder.github.io/large_image_wheels -U # xcookie: +UNCOMMENT_IF(gdal) + .gpgsign_template: &gpgsign_template @@ -103,25 +167,27 @@ stages: gpgsign script: - - export GPG_EXECUTABLE=gpg - - export GPG_KEYID=$(cat dev/public_gpg_key) - - echo "GPG_KEYID = $GPG_KEYID" - - $GPG_EXECUTABLE --version - - openssl version - - $GPG_EXECUTABLE --list-keys - - $GPG_EXECUTABLE --list-keys - # Decrypt and import GPG Keys / trust - # note the variable pointed to by VARNAME_CI_SECRET is a protected variables only available on master and release branch - - source dev/secrets_configuration.sh - - CI_SECRET=${!VARNAME_CI_SECRET} - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import - - $GPG_EXECUTABLE --list-keys || echo "first one fails for some reason" - - $GPG_EXECUTABLE --list-keys - # The publish script only builds wheels and does gpg signing if TAG_AND_UPLOAD is False - - pip install requests[security] twine - - MB_PYTHON_TAG=$MB_PYTHON_TAG DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE DEPLOY_BRANCH=release DO_TAG=False DO_UPLOAD=False ./publish.sh + - | + export GNUPGHOME=$(mktemp -d) + export GPG_EXECUTABLE=gpg + export GPG_KEYID=$(cat dev/public_gpg_key) + echo "GPG_KEYID = $GPG_KEYID" + source dev/secrets_configuration.sh + # note the variable pointed to by VARNAME_CI_SECRET is a protected variables only available on main and release branch + CI_SECRET=${!VARNAME_CI_SECRET} + $GPG_EXECUTABLE --version + openssl version + $GPG_EXECUTABLE --list-keys + $GPG_EXECUTABLE --list-keys + # Decrypt and import GPG Keys / trust + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import + $GPG_EXECUTABLE --list-keys || echo "first one fails for some reason" + $GPG_EXECUTABLE --list-keys + # The publish script only builds wheels and does gpg signing if DO_UPLOAD is no + pip install requests[security] twine + DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE DEPLOY_BRANCH=release DO_TAG=False DO_UPLOAD=False ./publish.sh artifacts: paths: @@ -132,10 +198,10 @@ stages: only: refs: # Gitlab will only expose protected variables on protected branches - # (which I've set to be main, master, and release), so only run this stage + # (which I've set to be main and release), so only run this stage # there. - - master - main + - master - release @@ -147,35 +213,36 @@ stages: deploy script: - - export GPG_EXECUTABLE=gpg - - export GPG_KEYID=$(cat dev/public_gpg_key) - - echo "GPG_KEYID = $GPG_KEYID" - - $GPG_EXECUTABLE --version - - openssl version - - $GPG_EXECUTABLE --list-keys - - $GPG_EXECUTABLE --list-keys - # note the variable pointed to by VARNAME_CI_SECRET is a protected variables only available on master and release branch - - source dev/secrets_configuration.sh - - CI_SECRET=${!VARNAME_CI_SECRET} - - PUSH_TOKEN=${!VARNAME_PUSH_TOKEN} - - TWINE_PASSWORD=${!VARNAME_TWINE_PASSWORD} - - TWINE_USERNAME=${!VARNAME_TWINE_USERNAME} - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust - - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import - - $GPG_EXECUTABLE --list-keys || echo "first one fails for some reason" - - $GPG_EXECUTABLE --list-keys - # Install twine - - pip install pyopenssl ndg-httpsclient pyasn1 -U - - pip install requests[security] twine - # Execute the publish script for real this time - - TWINE_REPOSITORY_URL="https://upload.pypi.org/legacy/" - - TWINE_REPOSITORY_URL=$TWINE_REPOSITORY_URL MB_PYTHON_TAG=$MB_PYTHON_TAG DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE CURRENT_BRANCH=release DEPLOY_BRANCH=release DO_TAG=True DO_UPLOAD=True ./publish.sh || echo "upload already exists" + - | + export GNUPGHOME=$(mktemp -d) + export GPG_EXECUTABLE=gpg + export GPG_KEYID=$(cat dev/public_gpg_key) + echo "GPG_KEYID = $GPG_KEYID" + # VARNAME_CI_SECRET points to a protected variable only available on main and release branch + source dev/secrets_configuration.sh + CI_SECRET=${!VARNAME_CI_SECRET} + PUSH_TOKEN=${!VARNAME_PUSH_TOKEN} + TWINE_PASSWORD=${!VARNAME_TWINE_PASSWORD} + TWINE_USERNAME=${!VARNAME_TWINE_USERNAME} + $GPG_EXECUTABLE --version + openssl version + $GPG_EXECUTABLE --list-keys + $GPG_EXECUTABLE --list-keys + # Decrypt and import GPG Keys / trust + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | $GPG_EXECUTABLE --import + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | $GPG_EXECUTABLE --import-ownertrust + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | $GPG_EXECUTABLE --import + $GPG_EXECUTABLE --list-keys || echo "first one fails for some reason" + $GPG_EXECUTABLE --list-keys + # Install twine + pip install six pyopenssl ndg-httpsclient pyasn1 -U + pip install requests[security] twine + # Execute the publish script for real this time + TWINE_REPOSITORY_URL=https://upload.pypi.org/legacy/ DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE CURRENT_BRANCH=release DEPLOY_BRANCH=release DO_TAG=True DO_UPLOAD=True ./publish.sh || echo "upload already exists" + # Have the server git-tag the release and push the tags + VERSION=$(python -c "import setup; print(setup.VERSION)") # do sed twice to handle the case of https clone with and without a read token - | - # Have the server git-tag the release and push the tags - export VERSION=$(python -c "import setup; print(setup.VERSION)") - # do sed twice to handle the case of https clone with and without a read token URL_HOST=$(git remote get-url origin | sed -e 's|https\?://.*@||g' | sed -e 's|https\?://||g' | sed -e 's|git@||g' | sed -e 's|:|/|g') echo "URL_HOST = $URL_HOST" # A git config user name and email is required. Set if needed. @@ -195,19 +262,124 @@ stages: git push --tags "https://git-push-token:${PUSH_TOKEN}@${URL_HOST}" fi - only: refs: - release -.build_install_test: &build_install_test - - pip install -r requirements.txt -U +# Aliases for the images that run the tests +.image_python3_10: &image_python3_10 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.10 + #python:3.10 +.image_python39: &image_python39 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.9 + #python:3.9 +.image_python38: &image_python38 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 + #python:3.8 +.image_python37: &image_python37 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.7 + #python:3.7 +.image_python36: &image_python36 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.6 + #python:3.6 +.image_python35: &image_python35 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.5 + #python:3.5 +.image_python27: &image_python27 + gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:2.7 + #python:2.7 + ### JOBS ### # Define the actual jobs + +# --------------- +# Python 3.10 Jobs + + +build/cp3_10-cp3_10-linux: + <<: + - *build_template + image: + *image_python3_10 + +test_full_loose/cp3_10-cp3_10-linux: + <<: + - *test_full_loose_template + image: + *image_python3_10 + needs: + - build/cp3_10-cp3_10-linux + +test_minimal_loose/cp3_10-cp3_10-linux: + <<: + - *test_minimal_loose_template + image: + *image_python3_10 + needs: + - build/cp3_10-cp3_10-linux + +test_full_strict/cp3_10-cp3_10-linux: + <<: + - *test_full_strict_template + image: + *image_python3_10 + needs: + - build/cp3_10-cp3_10-linux + +test_minimal_strict/cp3_10-cp3_10-linux: + <<: + - *test_minimal_strict_template + image: + *image_python3_10 + needs: + - build/cp3_10-cp3_10-linux + +# --------------- +# Python 3.9 Jobs + + +build/cp39-cp39-linux: + <<: + - *build_template + image: + *image_python39 + +test_full_loose/cp39-cp39-linux: + <<: + - *test_full_loose_template + image: + *image_python39 + needs: + - build/cp39-cp39-linux + +test_minimal_loose/cp39-cp39-linux: + <<: + - *test_minimal_loose_template + image: + *image_python39 + needs: + - build/cp39-cp39-linux + +test_full_strict/cp39-cp39-linux: + <<: + - *test_full_strict_template + image: + *image_python39 + needs: + - build/cp39-cp39-linux + +test_minimal_strict/cp39-cp39-linux: + <<: + - *test_minimal_strict_template + image: + *image_python39 + needs: + - build/cp39-cp39-linux + # --------------- # Python 3.8 Jobs @@ -215,69 +387,275 @@ build/cp38-cp38-linux: <<: - *build_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 + *image_python38 + +test_full_loose/cp38-cp38-linux: + <<: + - *test_full_loose_template + image: + *image_python38 + needs: + - build/cp38-cp38-linux + +test_minimal_loose/cp38-cp38-linux: + <<: + - *test_minimal_loose_template + image: + *image_python38 + needs: + - build/cp38-cp38-linux -test_full/cp38-cp38-linux: +test_full_strict/cp38-cp38-linux: <<: - - *test_full_template + - *test_full_strict_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 + *image_python38 + needs: + - build/cp38-cp38-linux +test_minimal_strict/cp38-cp38-linux: + <<: + - *test_minimal_strict_template + image: + *image_python38 + needs: + - build/cp38-cp38-linux # for universal builds we only need to gpg sign once gpgsign/cp38-cp38-linux: <<: - *gpgsign_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 + *image_python38 deploy/cp38-cp38-linux: <<: - *deploy_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 - + *image_python38 + # --------------- # Python 3.7 Jobs -build/cp37-cp37m-linux: + +build/cp37-cp37-linux: <<: - *build_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.7 + *image_python37 + +test_full_loose/cp37-cp37-linux: + <<: + - *test_full_loose_template + image: + *image_python37 + needs: + - build/cp37-cp37-linux + +test_minimal_loose/cp37-cp37-linux: + <<: + - *test_minimal_loose_template + image: + *image_python37 + needs: + - build/cp37-cp37-linux -test_full/cp37-cp37m-linux: +test_full_strict/cp37-cp37-linux: <<: - - *test_full_template + - *test_full_strict_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.7 + *image_python37 + needs: + - build/cp37-cp37-linux + +test_minimal_strict/cp37-cp37-linux: + <<: + - *test_minimal_strict_template + image: + *image_python37 + needs: + - build/cp37-cp37-linux # --------------- # Python 3.6 Jobs + build/cp36-cp36m-linux: <<: - *build_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.6 + *image_python36 + +test_full_loose/cp36-cp36m-linux: + <<: + - *test_full_loose_template + image: + *image_python36 + needs: + - build/cp36-cp36m-linux + +test_minimal_loose/cp36-cp36m-linux: + <<: + - *test_minimal_loose_template + image: + *image_python36 + needs: + - build/cp36-cp36m-linux -test_full/cp36-cp36m-linux: +test_full_strict/cp36-cp36m-linux: <<: - - *test_full_template + - *test_full_strict_template image: - gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.6 + *image_python36 + needs: + - build/cp36-cp36m-linux -#gpgsign/cp36-cp36m-linux: +test_minimal_strict/cp36-cp36m-linux: + <<: + - *test_minimal_strict_template + image: + *image_python36 + needs: + - build/cp36-cp36m-linux + + + +# --------------- +# Python 3.5 Jobs + + +#build/cp35-cp35m-linux: +# <<: +# - *build_template +# image: +# *image_python35 + +#test_full_loose/cp35-cp35m-linux: +# <<: +# - *test_full_loose_template +# image: +# *image_python35 +# needs: +# - build/cp35-cp35m-linux + +#test_minimal_loose/cp35-cp35m-linux: +# <<: +# - *test_minimal_loose_template +# image: +# *image_python35 +# needs: +# - build/cp35-cp35m-linux + +#test_full_strict/cp35-cp35m-linux: +# <<: +# - *test_full_strict_template +# image: +# *image_python35 +# needs: +# - build/cp35-cp35m-linux + +#test_minimal_strict/cp35-cp35m-linux: # <<: -# - *gpgsign_template +# - *test_minimal_strict_template # image: -# gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.6 +# *image_python35 +# needs: +# - build/cp35-cp35m-linux -#deploy/cp36-cp36m-linux: + + +# --------------- +# Python 2.7 Jobs + + +#build/cp27-cp27m-linux: +# <<: +# - *build_template +# image: +# *image_python27 + +#test_full_loose/cp27-cp27m-linux: +# <<: +# - *test_full_loose_template +# image: +# *image_python27 +# needs: +# - build/cp27-cp27m-linux + +#test_minimal_loose/cp27-cp27m-linux: +# <<: +# - *test_minimal_loose_template +# image: +# *image_python27 +# needs: +# - build/cp27-cp27m-linux + +#test_full_strict/cp27-cp27m-linux: +# <<: +# - *test_full_strict_template +# image: +# *image_python27 +# needs: +# - build/cp27-cp27m-linux + +#test_minimal_strict/cp27-cp27m-linux: # <<: -# - *deploy_template +# - *test_minimal_strict_template # image: -# gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.6 +# *image_python27 +# needs: +# - build/cp27-cp27m-linux + + +.__local_docker_heredoc__: + - | + # Commands to help developers debug pipelines on their local machine + # Grab the base docker image, (forwarding your ssh credentials), clone + # the watch repo, create the environment, and run the tests. + #docker login gitlab.kitware.com:4567 + #IMAGE_NAME=gitlab.kitware.com:4567/computer-vision/ci-docker/gl-python:3.8 + + # Use whatever image is defined for Python39 in this file and start a docker session + IMAGE_NAME=$(cat .gitlab-ci.yml | yq -r '.".image_python3_10"') + IMAGE_NAME=$(cat .gitlab-ci.yml | yq -r '.".image_python37"') + docker run -v $PWD:/io:ro -v $HOME/.cache/pip:/pip_cache -it $IMAGE_NAME bash + + # Will need to chmod things afterwords + export PIP_CACHE_DIR=/pip_cache + echo $PIP_CACHE_DIR + chmod -R o+rw $PIP_CACHE_DIR + chmod -R o+rw $PIP_CACHE_DIR + chmod -R g+rw $PIP_CACHE_DIR + USER=$(whoami) + chown -R $USER $PIP_CACHE_DIR + cd $HOME + git clone /io ./repo + + cd $HOME/repo + + # Make a virtualenv + export PYVER=$(python -c "import sys; print('{}{}'.format(*sys.version_info[0:2]))") + pip install virtualenv + virtualenv venv$PYVER + source venv$PYVER/bin/activate + #pip install pip -U + #pip install pip setuptools -U + + # FULL STRICT VARIANT + pip install -e .[all-strict,headless-strict] + ./run_tests.py + + # FULL LOOSE VARIANT + pip install -e .[all,headless] + ./run_tests.py + + # MINIMAL STRICT VARIANT + pip install -e .[runtime-strict,tests-strict] + ./run_tests.py + + # MINIMAL LOOSE VARIANT + pip install -e .[tests] + ./run_tests.py diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000000000000000000000000000000000..e6a5fd4721cc3af97786e68233977dc6d97f030e --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,33 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details +# +# See Also: +# https://readthedocs.org/dashboard/netharn/advanced/ + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Build documentation with MkDocs +#mkdocs: +# configuration: mkdocs.yml + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 3.7 + install: + - requirements: requirements/docs.txt + - method: pip + path: . + #extra_requirements: + # - docs + +#conda: +# environment: environment.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 24d55eb803086790d5734d803a2f4ddd88fbad30..68c14980e4e1a522b7f5fa128bf9b8f995d2f83f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,13 @@ This changelog follows the specifications detailed in: [Keep a Changelog](https: This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), although we have not yet reached a `1.0.0` release. -## Version 0.5.18 - Unreleased +## Version 0.5.19 - Unreleased + +### Added +* Ability to ignore the first N epochs when choosing the best model via the `ignore_first` config. + + +## Version 0.5.18 - Released 2022-03-19 ## Version 0.5.17 - Released 2021-10-05 diff --git a/dev/check_pypi_versions.py b/dev/check_pypi_versions.py index 3e5163113dfdcdbb64c9e8daff751e9ab489429b..bdccd848b9a59f3014f2925753918c6015aa4b11 100644 --- a/dev/check_pypi_versions.py +++ b/dev/check_pypi_versions.py @@ -5,8 +5,11 @@ pip install version-query pip install yolk3k """ # import yolk -from distutils.version import LooseVersion import ubelt as ub +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion def query_module_pypi_info(modname, verbose=0): diff --git a/dev/ci_public_gpg_key.pgp.enc b/dev/ci_public_gpg_key.pgp.enc index 65133cd97a00f49690243ab79203f9bec1ea07c3..e338fa207554dad8cf7aa74de623f92e028f7b1e 100644 --- a/dev/ci_public_gpg_key.pgp.enc +++ b/dev/ci_public_gpg_key.pgp.enc @@ -1,35 +1,35 @@ -U2FsdGVkX19tg+kVPd4XdgZNv8cK1h8ZK8wLCoDWUA0iV9h9VcZOsZhS2DXs+B42 -ubhcX5wyzCbO0KVSjtO/s9slrmIsxj8FozvdZylqT/gOCEsgMx6Ek96whfTitkew -+Uy9yKvJhqAdc9K+5rfYSh+cq6037MWU3mBs7jhOiF17ixQKcunRkLV0zjpyTV4/ -cMt/zVmOqaZcxUQaEXG2iIpoPB0l0pbkBTxhclIHoV8PA+erdvTdsIV5vZKB2zN7 -Nw32ijgqxS/JGkUWO7UemReh8hVkvIwpK1sibyXme44RYb69nLgyXRhT/kThewBL -JPutNHD6swEQGx/xLMTA50JY3XudjWStg8WZpyzO7dCqaUxl5q1GDKxhzwBfvV2Q -zifn/uTI972Ml87A0MXw5V7Pb1LUrd+tneiYmjCHNEMUvSV41XUt/khNev9ecADC -ES2KQYP+52mZF/y1uKZqHKIaVahK9EesmYQO8vgdesKIwVZRNNyCRvqHbteXYg2h -KZ/WkiAsB++GqAlkvrEJcwONesGJCYcSPIaGkelAyuYeFRvs0vFonR1/7EB9Y7W7 -gK92DZ0DVS+VmFXdQf0uOZ1oqREh3FL5lr9ihibV0GvSZ73MqQt6IHD4IBlaYQ2h -GOmAVjB2y9RZeGlEYh+r7ketb+tmkT7MlOB2nj0K6EZfhRvfM55RfUPWUg297pY6 -hLrQw4UBnmsP8wFdrb7Sb/IVx8+0RM1x6Egep6DphkQr0lm0DcXxi330Pj2mhZyx -sE1S5JqKiHBTaf9sZtGT2dP0A/DLvHY/9LHGbGP4HgmBNv8Qo0CotoLTgMvlf9OD -DCG4tvU3EjIM3vW9bmvEYhH/G0FpNkrM163xDAQKrlD6oioakxZtYxCU9mkxouSZ -hL1Pi7m56PFvSocoghqUqo68HwEsvIN9fCjxdOdBosl18+yPPnzmDPCiW3UvDLOj -gBSJ3z60Vu2nNQe7NQAn5xTpYae7gobN88dk6OX1xoXwXWUgmIg/sIPZ3Z6Y7gMY -eeeV0J52ob7chPNR+6iG9bpV5Lp9vhCbmbDLp0zKjZXx9j9JxoBtWFmOC6Esa7De -rthn3gGxtLJisXMZsUvfVjBecBQK6Qg1ahN38oU2bfYd9KYdf3fEdgclX3UufEwG -0PXIZmn9wKB7orW0rNEU6mon1J83IXpgPNintuthX1ZvYiiBmtK/oc00bRSTUp68 -XBzGUzmPEUwx7EzigYMRsX9KwZTfZWOE8JrOH2dGeY1tLKrJSdIRbrt6fJHqmUDS -G3V+ix1B6bnLw/xXeozK1GhFrANDrK4KP0NlTpkwS+9hkyNpJzeeXPn6oVmxEyfZ -1f/lUrGndvTHgFM5S3a7pBuqhYpt3VO1yxiMlggS0Q9akiFfWI/UAbRLuJrsmvx6 -n/0lOgldcIJ420rXcbmG2LfcLmtlNz754LfoTC5AYiMxQ261VIOOsbnc64lzt6al -5fBI/nl5B34sB5s0DiXdxtjTH/RkaGEAFjpZ97WwIwgZFFtH8BGLvtC6TauEdsVl -mq61wbMJNZCOAU+UpvtzbdX6Ol2wSRTI35AbPvRteFvenZKnHcfwbVyOS+GJkLuw -poypqidC2cHROhz35M53wd+1HDWSjl6Uk2wM8HO85WVM5JDX87SMyDTFIc0PZNxS -Mc9lPCMozOSukrXANp3mcUiHdSXQJ6+TftYHzUZNpFYu1wZ39ByOvIU6tHBALPdD -Ba958nk5/N+sT2YcnI50nPiIz3aOHtd0vagtqELYNg0kze1fpQYHAfn3eu7ISq6Q -S+PIL052Bm8xy9S2z/zooLbckJSRe40bO07is7NVjVO9ykq4ybV8AXtq0Ht3GrJL -J7FRnl8wOtPXp8rnps9WpH3WpHx1L9kopCUdW5U1n5FyrxibbReNShrbu+g6NBR/ -F0qZJe8Y1/jmWczLsfLgIGqNUiQe1j5JY4kQC1IAUPn4qxbRJ5x7EiN7s2qjL+p4 -A20Jzx8cmMALER4DHUYz4tjO8qg3zIih3jH3vrZ8ypm0vxsatBtljsva2KraJdRw -nohLyrcwYP6uCLKJKPB6PKX1en9KhicFjOoCs46NPkoZ3E7UhSg6lRRwfM/xNk/P -4I7+R/h9Y5ceIy6BfxTyRrLJxeCPHNGENFo/Bc7ckT65RtjkxekN1FC2fmAxksMF -ijp1t5BnWA2nrX+/rwYhzQ== +U2FsdGVkX195/LDODgm0n4ZK+F/wOH5xAa9XARpCbPD3Xi0NSk0ReJYvHVYru+sy +AgPg4WUiiC2YcOxZLFjev5rDKxiHAWye8bs99JnvAOCPSqaBul4A5jrJwi6dm5VB +imgmGPdK/chWrUAp6+80obMyuiwIBsPbgjuAliE2LvYfevraikRkB4oYIykMrysh +s0blX8J3fQfUKk3myT3KwVs9hpLRcTGUbyiph8rFsj2d2/hI4BojmmTepFtXds2X +4Xw8tk2fpcMBpoR+zVLW+bNR2z9jI1H+Iv6yg2/J6KDbJL3mpK8kuh/BMA3OcBxn +NqK5dYpiXaBf3B8PNsk5K9GlXDmcxfbb+YVa4xAWI3FHDLoOWPWKbfKRZRxYBHXJ +bzP1lkGUMIb0oXVW/k4SPnmZydrQg6lqrWjMSEAwxdyN70vobVLZ3a9s+61cpdWW +rLwuWJIoQo1Pd1JZw1Y8BOlUSIKImX1YGtz0d207OyrM2SMTH2Z6DTrnGhnu2EAJ +OQ44+zkgRXxlb43dK4CRd+SVqR+/Z92HI6yks1LDEPP6LfKDi/0AbHNUiJcUMFjR +dM3q0G+NKVIsRJCAU/bPmWB9FV6cxr1V56BQKAO9TSn+67Uv2d80vw6E91XgDMDM +J+XyBwiA6B2BgRzVbIKssBPX1nrzDyVT3WKXNKSubJDjwe7iDhPLwTwGc66DQ/gn +K75pYQ6vQbnIgNZipcl/ZQxCXSVtuCtSc+qdj0GXIOk4IUDO0qE+7TR99FUPLT5L +Jv+lJ1TUrf+OmvRF59lTdnCx863UYRNajmSWwOdkglJu/CACmv7VfI7884vta7o4 +Yab2wQe2WhFUmKIGRc18usPh7LmC2k5lsmzRnC0xlacfyOdOyJOXn28VGSaEW1J9 +q31UG9dID1Q8eo2KesVdyyzgDbXUqIMznc9hEpsiGKcUB+YZtoLGPvkQbBIkq2Yw +yVJeBH1IJwfZ91TVDOElA9RHMvfpnyx6TFvlWJ+cuvNxrX2DpCovMDy3opJzv2vN +GJdfVPG+na0NjaD5oHoPDaQDCBDzsd3keh8wJKaatiaRtsfmcYrznZrsdzacQkaN +YmTntMN9Xg35ASQ2VqCYKrU9NKqeHhM+8Sxe4a8/onEZKSbTvHkJnlfzs9oeS8Zk +kNJ+vos0SY6FCtoTgywPSmzDq5G8ijPuaZnRcqcrecHYIBz4S5M1YTbEskVzJY7D +IXiL0LAVj+ftMaRuTronJzCXkK43Zc1wRBI+4D7CylVDE4ZZ3iGp/587Is5UOeGd +GEWM0OeMWalXWUu3wXFyCU/P+tRKUOiV2etrogYfJQ0jjD2aCbxCXXTqbeTpW7UM +o8QUj9vJW3b8M2fDa7nRBYwgz0150GVnOO1kgtUHR7HYghg3lGN1uAJkjT+xBpxr +mY0Lp/nZhs732rHYq347sEz7FEAuZrRdeoR1vU4BEagiOVQdYN2ByHn/6frpUphM +odSNcPowi28N1lmVpEkSX69l2d2K4lKI1ki1ofTvMEf8awmU352OcHVb3/ld/pGI +/uqMFwVannZUPEUByh9iSJV7eZA21YO2bQGRgXoQ4SCkG+2sAYyXlOdUesstN0hH +Uzx3kTSk8XGBhVpQAm2ldp9qnojZbr69vVHLmr+nxtT9g0Qspn73uD7cGjKPrIMu +gzDZMSouPfIKzJy23OJmzTIqqLtEgH3U5BbYx0IknaNG7/dAV/3ik6mjmUVFGBRv +rAPnog6MNW4nvmmS449D3rCjoT8mPHMm3mVMDetaJbOMDP7KMWHIVR/FfWUeQHDX +YkbFxaaGL4kpcQqNMSnb9jitAHbecprYpnYVtS49EG8tE/TktuHpaukkPFPKgiwZ +OhJLfUpOqIwa8CAjoR4lIxBuzYdnz5f9Og+c0nWMW6DVFP6yRz1gPf/RveTbuo4o +liLWfjVBqZaXm+YrE99q2lReLRcLEaqiA5ulyvr84v+2PeP12hUeus8J5OVmVQrw +1j5xTjVTW0Of0ve7JjjQwwZ80V8GIusRBjd9o9cRb5G2q6pt95N8g6+jsUHRqx4U +c7LuMEYT8eTo7/31omk0THS4CP4qMU7yFX251cEtUOjzhlNphiex+nOLbWHgyEqv +ZW51NS+7MwrOlmOg866RRIPL5r9h7i9yjEBi43bwRK38BnJSgsHpgK+pLxA/CE5x +ZMRsU1Vc9e87ncuck3J8rA== diff --git a/dev/ci_secret_gpg_subkeys.pgp.enc b/dev/ci_secret_gpg_subkeys.pgp.enc index 8176a94e1efb762ff75358028ae6f442c2b7df00..4acec40faec3668ea4ee3112f219a2082f3281f4 100644 --- a/dev/ci_secret_gpg_subkeys.pgp.enc +++ b/dev/ci_secret_gpg_subkeys.pgp.enc @@ -1,27 +1,27 @@ -U2FsdGVkX19xlphlX9RYlbnWqkWThd/1EzoFeOx93VPLXYl1VWenvzMIR0nuxWUR -I8S8HYU5ef+JHI77HBjjoKiWjR6I+9bttYNpsdYWDp2biIcIsvwR0edf9xyYqN17 -lp5316SsXfqhQCF60FLDQu2JLNla6HoDOEmZG4nzI86PSzXMYwpxwFESih4TUbeQ -QbYr6XKY1L/WxjRzt/ZtG3PNhCNDm7bYGcm0JS0Xa4p+cbUb6qgiHyunU+mEewSE -e/RXgbOeXM6PKuI8dUDNQ1XDg62OJ3y3ewj1S6ZYKt3cP1S6MWuRQ2B8JUpfLwz4 -uMZJPrbQIMm9UnzkVYCX4tQ/6b19tJrhJxeYAvu9CtLeKDfl7YE1vFx2TBlzDGgl -UXbasfWRcQaBdBoarZ1G1sMFrvcGFhS3dAX2laattlLeEt8pBe/5WDMnDK4EaSR7 -fJcdWbzvH9jzM9Ww0N2sEH66uwnI4On2hj7rKGMy7pJuzHyjov2PTIV/A17rvboS -uJ6diZJXz2HW2f082e3OQQFtb/23JhxDPEulRLfDnijmWio+qlHYIcilHeO61fo8 -01Y52kGTDrTL3zKybY09J/vvj140sY7HDJuMqOIc7zYDI0G3/2rEtjGPjkKgukp/ -8GPmPWHvPgb0UcYxRhqU/DXJWlOocVtGFDkQJ1YDKK7uwYvSOisZ8WoTStR8ZjIk -ka9MLsRYmJ2vogEi1KgUxKxyWGoBKUeO76VnOOycX9c+H0RJ2Z1x5DAJA6+NsdDI -slbNvioVz6ccRdk9fYLQQ66dkmmIs9m2fhXcKDjK/JTr3/H19fdtRbnxztWVe6oW -CardLNESGHfNn/7YowW4yuhYzz9l1kyw9jPcW7vzz36tEvNba145Jv/fFuU6P2ew -S4dTuuW7jMD/WwkG42mgNAFuFFmSNcjrNB24sPukGhQmMi6FkIMCfXwUQCi1mFT3 -ioiP+DmWwq0dzB7zwD+mvgv8CdrsC8KPghH+yD9PGoaozEcvHOTEt1sFu0Pdv4CV -1OG+ulGrWPpXJQEXchkg6PifTzvN/2yrrssPOpgnzr3rp8jbrdS3YE+qWL1dXQeg -Xz76tyzGj2EjMsl0OHzDiuJx72mqmQB28ZjyHBtIhOKdRRetbv9pCl9EgXP+JDkM -3/wexneKoJi2CyuI6ggQ56RFRXywax7g/5OsUoHzie5QrTKPX8pEhoSn4UZNyNYc -c2gcyYBZBJaAo5bAWU7NPtkva8mGy7eRLdLKu+g1k+xYakMjxnKgK9nEg1NRhnka -LKmoYGOK7F4IR6FfQvJpB5Gcq+qmk1oMMuNjDAAugvwpVnTgmn+isbMnPR6vyxX6 -WrGciFOC50z69mT6IxQlyOFwmS+jpWwobgLFg7i+jdCSnC2OUVzxTGVkepq5Nfp/ -Epo/PrOKljJLO4T2OsMphEnnCbgFVOMfiKFP2q5gjGzIFOB6B5wbBm6Bm0sfWF/e -dobUzTlo9yGji2kaKashiI1UzyCx1gMUBt+DferEXis9rDSIX4cKFbNvLHWBdIZR -IsHfKo5LUUNfEXGtW9RVrlf9FxzFq7PB32zWMrcAPB1WWPaXdBqA3AChi1QvpqQD -CEAR1hdEpvYXY+vXtYsw5k7nVafqv5OvZlxue4o9TYs+cj/AgZenAb2bBOSHboMb -NueCK3KLTlhSfr+fYJoQdkzujVAk6AaEOX6OvCXRw8OwtAXdJBWCM3pDJoH06DpT +U2FsdGVkX192wtoDMNw97I2z1ltAliLgkHEly1jCJaBuhPqySwrFKmk0At+X7aNW +LGygUWA0Dz7WAKRq1Fe7i4XRy9G9vHTKp9GJC+UV7bXpZhm7Qakx70Car3V2Xt7M +B3Q65TFiJuF/M1JNmYOj8APLv8un/euUsjPEwXtn7V9+O0izySpPbr/RmiFTu6L5 +28LLMiA0m/4ZDIqpFrAht0F/KWrUbn9TmdO+VPBAxXD6ETbuR7dO72vOXVELZr6m +IIq3rNKN3RuVFUQId7KcbDQdeKUmlXI/oeROND1PCq9n04vIhNJ5V+iPFa3uyYc/ +f4uzMdojv40aoRLsmS2YPdK4StkMn2q/yOlnw2KIC7ic+V5hFx3PCTvQPhHikyaZ +y1C8XNpXl8NdOrn1lQlh4yTNIxgD05V8R8eytdj9xD1KruEh6e7yFfLQJcH5j5Qv +EYeYdj153+RSZdkPwzE3mCdX8i1Zfl2sil3iGia4m262n8/+lbHQtFK2c8oj5S33 +UlYxpgT6JVuEYue0Jr/IEgJDBGq1r2+OMlSRbfYdJXYgxUJGAm2fBT261/1dc1YE +sEtuo/CWlinQS3vcsOhUpLYknlDNmeSx/mHdYJ9M/aOKdm4CMHqWi8WM6m7fz+CP +kvFw7SvAz8tER166sO1CN5T1eQ3U/Bo6BVU/9GEibGkNIq1MB5IHlPZxwrnbv5Ij +FeyLmhRgEaKfHXzDSi13DfJ5IqO/n/VFL1Cgng3/qMw6nvtOQv0+Xi8wXbhURGE6 +2RqkpasmCi/Ro0qTUZimH5qH2GbVHebbLo7Amn7E51CX9eHSRZMo9tW5kVXjMG+g +Va/ckpAZXrH6bnCXlHLVD0K//TXtycyIy9g8pqYecJR6PF3ziQQROhc2URQmJXlw +F4cUcRj3MemrLk8LSO6yntgd1K510x3jKI0T+gOZKcY2ucNHUL7WW+QGbS4EW+wr +9HJhkCgNKGqLMKxbJOvpWoQ9SNt1vR51boMmPbC7f+FG00Evg/sCJr3Nm1Y5VrY+ +2I9XkNnHpI4c3jAT1yU4hBpzGXTiPW5tOq+sY8bP24D4O/vdQJiaSmfiX95jW4uR +xh4QQzfi8bz5o6HJWKTxrcRjpihWUgIzjBPoVvjXUQG68MfumgmJcVALuIU7nhay +EMNNDR07Rqeeixoe2zKnvZJkkzjNDDqK3nQ3s9S+SAkiJiy/m+Yq2Z872e3nTop7 +HkIR4dt7NBCLiabVTQcTfSkjmobcA7edEJRjabRDV5lduxXFE9yM7v4ULeMhY3uT +NTEailyttfUcGuLoFQ/1DMQoPKD4Rs72mLOn5hu+nuFyoitooUZXh30dSk0OASdR +jB3el2JK20nk2TLgGy8P+xcmHlTedF3/R3UikNNTHtOyztPvSiCWTWiC6pptajSa +pFQ6GUbryR6PcFz4VMT94rxCw74ujB6Ti6uSyHexoyAnZecOkWwlvv0VatomUPLm +Lo8FUvIddaDCWdOr8ARgghSBLKnz/viLaKquKsgOO77VtRzBgeflTZXlC9Icj78X +QUlXb6pOy1eaEN8HsAY72iPX514IUvszNwsFcB1vtMhrEgwIbBoDoClBeePzo5sl +UZeIZ034QAMWJfoid132wZTZbs/x3kXIlV9uqpiBopeC5oRGA3U/kuMB8z+yGau3 +G6fslHDIItnnJ70wyfyxV/tSae4ysbFjeEZIcg1KeCjYXrXmgD7MYCmVzIPJu2eV diff --git a/dev/ggr_matching.py b/dev/ggr_matching.py index 4f149e8309b367418da4d2c66856611335b53f7c..7c066e39ecb44bbfa594a688abe788bf26dba4ad 100644 --- a/dev/ggr_matching.py +++ b/dev/ggr_matching.py @@ -334,7 +334,7 @@ class AnnotCocoDataset(torch.utils.data.Dataset, ub.NiceRepr): def __init__(self, sampler, workdir=None, augment=False, dim=416): print('make AnnotCocoDataset') - cacher = ub.Cacher('aid_pccs_v2', cfgstr=sampler.dset.tag, verbose=True) + cacher = ub.Cacher('aid_pccs_v2', depends=sampler.dset.tag, verbose=True) aid_pccs = cacher.tryload() if aid_pccs is None: aid_pccs = extract_ggr_pccs(sampler.dset) diff --git a/dev/gpg_owner_trust.enc b/dev/gpg_owner_trust.enc index 26ef0a090737ff4aa741c2d503e5d4c150681333..452aa9475a992c78e601f62c8fad8181ec7aa4b4 100644 --- a/dev/gpg_owner_trust.enc +++ b/dev/gpg_owner_trust.enc @@ -1,10 +1,10 @@ -U2FsdGVkX19EqYN6ZLKcFRAOa3KyIA5fxeQwYimK0H/5TmJi0UOFtiZv1v5/SpoJ -0woGOe0nk+Fa3b8r88rYv2kh+pFCfYXODh0bl+m3A3FZML7EOzQ9j440oNjM+yQe -JX3G9pEMxdPCJpHruo/snR+4PwDCsA9BwbgbY1KHTDAKDr7gcpKxe5KgLIchLTFX -nnE7bWshZt2ty6I4oqr1Hzi+g/8NvpBB4W7uXoGysoBR0WzY4xtDbuGKF6VOl/TO -3Kbnuiy1wdRA80MlIx7vbYMoLUP0op/ZG7Tb9ZBcEmzMwPypU6qaMGW/Ymy2BRxK -wQHkJ+S1RuutV8NpRGr2op5Drwk14TC73Jtn8gBJJ2Vk0O3aZLJGkfbBWlKsocZL -Rk4lgrzhesgRHFRzB9QNn8jJ+wLEiMxyKnnTax/3/n/EqDM1NdStxKIGFd0ZzT1J -Zd/abSTrrXE7rgSZiz5oNoGQKauD+lAHYNkHInbVuZHeVneaYB9IiGrNk/t33/Op -gVRy++TadqxK162XoU/TiiFKpF68Uvaw4pnn9y3bdnQtbD0QuMfHQrDz/mcnt5A+ -6LLGK4zRRabZssfhhE8ISw== +U2FsdGVkX1+fix/zD7WLjEq5xzyU5A4FKBnPmyfHSd5mpLszwFKm7ElieCFvknUf +TYx1vg7q+bZmFCIT+NsMpeVzOqof84dPFUGeYmRHoWO1RX/4hC86yI+yWQzKdPaD +aY5ZiR8xf3ZVP2UTFC5xOpcI54YHcVh30FsVtn6edXA/iXv2q+rnDC/5W229TEAj +huhBn+Km3JuhFXzlQjSUcfwHrFhb8LxM5T4ARhthGtG6UQs6gyAcAgFl2ucHdek7 +Ti4HeQEUmIjgdXA9GJP6H4GHYhQZO3Pk7i0m9YXnmjpJLd3uxAmcJ0nh4BJTTLBS +v715I1tHRK+SChdZP1IWXjRRq2WFP758YVYdhSiozFBtPOcFkmtOzYJwxwbvcIFM +CgXkbJzwgOue2Rb/x0380UT/X4NlmQh6tGTVqhZy4JKKoXdOqDUem2JxohZ8fS4o +zYaBTSVooOmsAKnxXTdCm0u3bpt0kETDWtzqrzmJuRizq9ptJ5xdiky2DEQSXEC1 +tkYwx0jE033OYCJK2QRAr8AGi1jcsc95gFvxquqQHyByPNkNgIjqwk0p0W8Cgzf8 +TomviPy9/TVhEM5ttvshZw== diff --git a/dev/imagenet.py b/dev/imagenet.py index 8c7177865fd1c21a8b9e37f77202c1d28fcc51ea..890318971ec3c4a02cfcd420a790bcf7015d0c40 100644 --- a/dev/imagenet.py +++ b/dev/imagenet.py @@ -246,7 +246,7 @@ def setup_harn(cmdline=True, **kwargs): config.load(cmdline=cmdline) nh.configure_hacks(config) # fix opencv bugs - cacher = ub.Cacher('tiny-imagenet', cfgstr='v4', verbose=3) + cacher = ub.Cacher('tiny-imagenet', depends='v4', verbose=3) data = cacher.tryload() if data is None: data = grab_tiny_imagenet_as_coco() diff --git a/dev/setup_secrets.sh b/dev/setup_secrets.sh index 56c347212bfaebf2c17d9cb99b656ac306218743..5394075892845a6b5fe320c5089be2feffc86048 100644 --- a/dev/setup_secrets.sh +++ b/dev/setup_secrets.sh @@ -1,14 +1,26 @@ +#!/bin/bash __doc__=' ============================ SETUP CI SECRET INSTRUCTIONS ============================ +TODO: These instructions are currently pieced together from old disparate +instances, and are not yet fully organized. + The original template file should be: -~/misc/templates/PYPKG/dev/setup_secrets.sh +~/code/xcookie/dev/setup_secrets.sh Development script for updating secrets when they rotate +The intent of this script is to help setup secrets for whichever of the +following CI platforms is used: + +../.github/workflows/tests.yml +../.gitlab-ci.yml +../.circleci/config.yml + + ========================= GITHUB ACTION INSTRUCTIONS ========================= @@ -27,7 +39,7 @@ GITLAB ACTION INSTRUCTIONS ```bash cat .setup_secrets.sh | \ sed "s|utils||g" | \ - sed "s|PYPKG||g" | \ + sed "s|xcookie||g" | \ sed "s|travis-ci-Erotemic||g" | \ sed "s|CI_SECRET||g" | \ sed "s|GITLAB_ORG_PUSH_TOKEN||g" | \ @@ -36,12 +48,12 @@ GITLAB ACTION INSTRUCTIONS ``` * Make sure you add Runners to your project - https://gitlab.org.com/utils/PYPKG/-/settings/ci_cd + https://gitlab.org.com/utils/xcookie/-/settings/ci_cd in Runners-> Shared Runners and Runners-> Available specific runners * Ensure that you are auto-cancel redundant pipelines. - Navigate to https://gitlab.kitware.com/utils/PYPKGS/-/settings/ci_cd and ensure "Auto-cancel redundant pipelines" is checked. + Navigate to https://gitlab.kitware.com/utils/xcookie/-/settings/ci_cd and ensure "Auto-cancel redundant pipelines" is checked. More details are here https://docs.gitlab.com/ee/ci/pipelines/settings.html#auto-cancel-redundant-pipelines @@ -85,6 +97,15 @@ GITLAB ACTION INSTRUCTIONS # and masked option. Also make sure you have master and release # branches protected. # https://gitlab.kitware.com/computer-vision/kwcoco/-/settings/repository#js-protected-branches-settings + + +============================ +Relevant CI Secret Locations +============================ + +https://github.com/pyutils/line_profiler/settings/secrets/actions + +https://app.circleci.com/settings/project/github/pyutils/line_profiler/environment-variables?return-to=https%3A%2F%2Fapp.circleci.com%2Fpipelines%2Fgithub%2Fpyutils%2Fline_profiler ' setup_package_environs(){ @@ -94,7 +115,12 @@ setup_package_environs(){ non-secret variables are written to disk and loaded by the script, such that the specific repo only needs to modify that configuration file. " + echo "Choose an organization specific setting or make your own. This needs to be generalized more" +} + +### FIXME: Should be configurable for general use +setup_package_environs_gitlab_kitware(){ echo ' export VARNAME_CI_SECRET="CI_KITWARE_SECRET" export VARNAME_TWINE_USERNAME="TWINE_USERNAME" @@ -105,7 +131,9 @@ setup_package_environs(){ export GPG_IDENTIFIER="=Erotemic-CI " ' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh git add dev/secrets_configuration.sh +} +setup_package_environs_github_erotemic(){ echo ' export VARNAME_CI_SECRET="EROTEMIC_CI_SECRET" export VARNAME_TWINE_USERNAME="TWINE_USERNAME" @@ -115,7 +143,9 @@ setup_package_environs(){ export GPG_IDENTIFIER="=Erotemic-CI " ' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh git add dev/secrets_configuration.sh +} +setup_package_environs_github_pyutils(){ echo ' export VARNAME_CI_SECRET="PYUTILS_CI_SECRET" export GPG_IDENTIFIER="=PyUtils-CI " @@ -130,20 +160,194 @@ setup_package_environs(){ #' | python -c "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()).strip(chr(10)))" > dev/secrets_configuration.sh } +upload_github_secrets(){ + load_secrets + unset GITHUB_TOKEN + #printf "%s" "$GITHUB_TOKEN" | gh auth login --hostname Github.com --with-token + gh auth login + source dev/secrets_configuration.sh + gh secret set "$VARNAME_CI_SECRET" -b"${!VARNAME_CI_SECRET}" + gh secret set "$VARNAME_TWINE_USERNAME" -b"${!VARNAME_TWINE_USERNAME}" + gh secret set "$VARNAME_TWINE_PASSWORD" -b"${!VARNAME_TWINE_PASSWORD}" + gh secret set "$VARNAME_TEST_TWINE_PASSWORD" -b"${!VARNAME_TEST_TWINE_PASSWORD}" + gh secret set "$VARNAME_TEST_TWINE_USERNAME" -b"${!VARNAME_TEST_TWINE_USERNAME}" +} + + +upload_gitlab_group_secrets(){ + __doc__=" + Use the gitlab API to modify group-level secrets + " + # In Repo Directory + load_secrets + REMOTE=origin + GROUP_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 1) + HOST=https://$(git remote get-url $REMOTE | cut -d "/" -f 1 | cut -d "@" -f 2 | cut -d ":" -f 1) + echo " + * GROUP_NAME = $GROUP_NAME + * HOST = $HOST + " + PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") + if [[ "$PRIVATE_GITLAB_TOKEN" == "ERROR" ]]; then + echo "Failed to load authentication key" + return 1 + fi + + TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" + GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") + echo "GROUP_ID = $GROUP_ID" + + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" + cat "$TMP_DIR/group_info" | jq + + # Get group-level secret variables + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables" > "$TMP_DIR/group_vars" + cat "$TMP_DIR/group_vars" | jq '.[] | .key' + + if [[ "$?" != "0" ]]; then + echo "Failed to access group level variables. Probably a permission issue" + fi + + source dev/secrets_configuration.sh + SECRET_VARNAME_ARR=(VARNAME_CI_SECRET VARNAME_TWINE_USERNAME VARNAME_TWINE_PASSWORD VARNAME_TEST_TWINE_PASSWORD VARNAME_TEST_TWINE_USERNAME VARNAME_PUSH_TOKEN) + for SECRET_VARNAME_PTR in "${SECRET_VARNAME_ARR[@]}"; do + SECRET_VARNAME=${!SECRET_VARNAME_PTR} + echo "" + echo " ---- " + LOCAL_VALUE=${!SECRET_VARNAME} + REMOTE_VALUE=$(cat "$TMP_DIR/group_vars" | jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") + + # Print current local and remote value of a variable + echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" + echo "SECRET_VARNAME = $SECRET_VARNAME" + echo "(local) $SECRET_VARNAME = $LOCAL_VALUE" + echo "(remote) $SECRET_VARNAME = $REMOTE_VALUE" + + #curl --request GET --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables/SECRET_VARNAME" | jq -r .message + if [[ "$REMOTE_VALUE" == "" ]]; then + # New variable + echo "Remove variable does not exist, posting" + curl --request POST --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables" \ + --form "key=${SECRET_VARNAME}" \ + --form "value=${LOCAL_VALUE}" \ + --form "protected=true" \ + --form "masked=true" \ + --form "environment_scope=*" \ + --form "variable_type=env_var" + elif [[ "$REMOTE_VALUE" != "$LOCAL_VALUE" ]]; then + echo "Remove variable does not agree, putting" + # Update variable value + curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables/$SECRET_VARNAME" \ + --form "value=${LOCAL_VALUE}" + else + echo "Remote value agrees with local" + fi + done + rm "$TMP_DIR/group_vars" +} + +upload_gitlab_repo_secrets(){ + __doc__=" + Use the gitlab API to modify group-level secrets + " + # In Repo Directory + load_secrets + REMOTE=origin + GROUP_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 1) + PROJECT_NAME=$(git remote get-url $REMOTE | cut -d ":" -f 2 | cut -d "/" -f 2 | cut -d "." -f 1) + HOST=https://$(git remote get-url $REMOTE | cut -d "/" -f 1 | cut -d "@" -f 2 | cut -d ":" -f 1) + echo " + * GROUP_NAME = $GROUP_NAME + * PROJECT_NAME = $PROJECT_NAME + * HOST = $HOST + " + PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") + if [[ "$PRIVATE_GITLAB_TOKEN" == "ERROR" ]]; then + echo "Failed to load authentication key" + return 1 + fi + + TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" + GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") + echo "GROUP_ID = $GROUP_ID" + + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" + cat "$TMP_DIR/group_info" | jq + + PROJECT_ID=$(cat "$TMP_DIR/group_info" | jq ".projects | map(select(.path==\"$PROJECT_NAME\")) | .[0].id") + echo "PROJECT_ID = $PROJECT_ID" + + # Get group-level secret variables + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables" > "$TMP_DIR/project_vars" + cat "$TMP_DIR/project_vars" | jq '.[] | .key' + if [[ "$?" != "0" ]]; then + echo "Failed to access project level variables. Probably a permission issue" + fi + + LIVE_MODE=1 + source dev/secrets_configuration.sh + SECRET_VARNAME_ARR=(VARNAME_CI_SECRET VARNAME_TWINE_USERNAME VARNAME_TWINE_PASSWORD VARNAME_TEST_TWINE_PASSWORD VARNAME_TEST_TWINE_USERNAME VARNAME_PUSH_TOKEN) + for SECRET_VARNAME_PTR in "${SECRET_VARNAME_ARR[@]}"; do + SECRET_VARNAME=${!SECRET_VARNAME_PTR} + echo "" + echo " ---- " + LOCAL_VALUE=${!SECRET_VARNAME} + REMOTE_VALUE=$(cat "$TMP_DIR/project_vars" | jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") + + # Print current local and remote value of a variable + echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" + echo "SECRET_VARNAME = $SECRET_VARNAME" + echo "(local) $SECRET_VARNAME = $LOCAL_VALUE" + echo "(remote) $SECRET_VARNAME = $REMOTE_VALUE" + + #curl --request GET --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables/SECRET_VARNAME" | jq -r .message + if [[ "$REMOTE_VALUE" == "" ]]; then + # New variable + echo "Remove variable does not exist, posting" + if [[ "$LIVE_MODE" == "1" ]]; then + curl --request POST --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables" \ + --form "key=${SECRET_VARNAME}" \ + --form "value=${LOCAL_VALUE}" \ + --form "protected=true" \ + --form "masked=true" \ + --form "environment_scope=*" \ + --form "variable_type=env_var" + else + echo "dry run, not posting" + fi + elif [[ "$REMOTE_VALUE" != "$LOCAL_VALUE" ]]; then + echo "Remove variable does not agree, putting" + # Update variable value + if [[ "$LIVE_MODE" == "1" ]]; then + curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables/$SECRET_VARNAME" \ + --form "value=${LOCAL_VALUE}" + else + echo "dry run, not putting" + fi + else + echo "Remote value agrees with local" + fi + done + rm "$TMP_DIR/project_vars" +} export_encrypted_code_signing_keys(){ - setup_package_environs + # You will need to rerun this whenever the signkeys expire and are renewed - cd $REPO_DPATH # Load or generate secrets load_secrets + source dev/secrets_configuration.sh + CI_SECRET="${!VARNAME_CI_SECRET}" - echo "CI_SECRET = $CI_SECRET" + echo "VARNAME_CI_SECRET = $VARNAME_CI_SECRET" + echo "CI_SECRET=$CI_SECRET" + echo "GPG_IDENTIFIER=$GPG_IDENTIFIER" # ADD RELEVANT VARIABLES TO THE CI SECRET VARIABLES - # HOW TO ENCRYPT YOUR SECRET GPG KEY # You need to have a known public gpg key for this to make any sense @@ -156,19 +360,19 @@ export_encrypted_code_signing_keys(){ # Export plaintext gpg public keys, private sign key, and trust info mkdir -p dev gpg --armor --export-options export-backup --export-secret-subkeys "${GPG_SIGN_SUBKEY}!" > dev/ci_secret_gpg_subkeys.pgp - gpg --armor --export ${GPG_SIGN_SUBKEY} > dev/ci_public_gpg_key.pgp + gpg --armor --export "${GPG_SIGN_SUBKEY}" > dev/ci_public_gpg_key.pgp gpg --export-ownertrust > dev/gpg_owner_trust # Encrypt gpg keys and trust with CI secret GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/ci_public_gpg_key.pgp > dev/ci_public_gpg_key.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/ci_secret_gpg_subkeys.pgp > dev/ci_secret_gpg_subkeys.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -e -a -in dev/gpg_owner_trust > dev/gpg_owner_trust.enc - echo $MAIN_GPG_KEYID > dev/public_gpg_key + echo "$MAIN_GPG_KEYID" > dev/public_gpg_key # Test decrpyt GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --list-packets --verbose GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | gpg --list-packets --verbose - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | gpg --list-packets --verbose + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc cat dev/public_gpg_key unload_secrets @@ -184,10 +388,18 @@ export_encrypted_code_signing_keys(){ } +# See the xcookie module gitlab python API +#gitlab_set_protected_branches(){ +#} + + _test_gnu(){ + # shellcheck disable=SC2155 export GNUPGHOME=$(mktemp -d -t) - ls -al $GNUPGHOME - chmod 700 -R $GNUPGHOME + ls -al "$GNUPGHOME" + chmod 700 -R "$GNUPGHOME" + + source dev/secrets_configuration.sh gpg -k @@ -197,11 +409,14 @@ _test_gnu(){ cat dev/public_gpg_key GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --import + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc | gpg --import-ownertrust GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | gpg --import gpg -k + # | gpg --import + # | gpg --list-packets --verbose } - diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..7c93ab7a9f82454d52f6aee061f3acf240e7510f --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,393 @@ +""" +Notes: + http://docs.readthedocs.io/en/latest/getting_started.html + + pip install sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon + + cd ~/code/netharn + mkdir docs + cd docs + + sphinx-quickstart + + # need to edit the conf.py + + cd ~/code/netharn/docs + sphinx-apidoc -f -o ~/code/netharn/docs/source ~/code/netharn/netharn --separate + make html + + Also: + To turn on PR checks + + https://docs.readthedocs.io/en/stable/guides/autobuild-docs-for-pull-requests.html + + https://readthedocs.org/dashboard/netharn/advanced/ + + ensure your github account is connected to readthedocs + https://readthedocs.org/accounts/social/connections/ + + ### For gitlab + + The user will need to enable the repo on their readthedocs account: + https://readthedocs.org/dashboard/import/manual/? + + To enable the read-the-docs go to https://readthedocs.org/dashboard/ and login + + Make sure you have a .readthedocs.yml file + + Click import project: (for github you can select, but gitlab you need to import manually) + Set the Repository NAME: $REPO_NAME + Set the Repository URL: $REPO_URL + + For gitlab you also need to setup an integrations and add gitlab + incoming webhook Then go to $REPO_URL/hooks and add the URL + + Will also need to activate the main branch: + https://readthedocs.org/projects/netharn/versions/ +""" +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/stable/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- +import sphinx_rtd_theme +from os.path import exists +from os.path import dirname +from os.path import join + + +def parse_version(fpath): + """ + Statically parse the version number from a python file + """ + import ast + if not exists(fpath): + raise ValueError('fpath={!r} does not exist'.format(fpath)) + with open(fpath, 'r') as file_: + sourcecode = file_.read() + pt = ast.parse(sourcecode) + class VersionVisitor(ast.NodeVisitor): + def visit_Assign(self, node): + for target in node.targets: + if getattr(target, 'id', None) == '__version__': + self.version = node.value.s + visitor = VersionVisitor() + visitor.visit(pt) + return visitor.version + +project = 'netharn' +copyright = '2022, Jon Crall' +author = 'Jon Crall' +modname = 'netharn' + +modpath = join(dirname(dirname(dirname(__file__))), modname, '__init__.py') +release = parse_version(modpath) +version = '.'.join(release.split('.')[0:2]) + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.viewcode', + 'sphinx.ext.napoleon', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.autosummary', + # 'myst_parser', # TODO +] + +todo_include_todos = True +napoleon_google_docstring = True +napoleon_use_param = False +napoleon_use_ivar = True + +autodoc_inherit_docstrings = False + +autodoc_member_order = 'bysource' +# autodoc_mock_imports = ['torch', 'torchvision', 'visdom'] + +intersphinx_mapping = { + # 'pytorch': ('http://pytorch.org/docs/master/', None), + 'python': ('https://docs.python.org/3', None), + 'click': ('https://click.palletsprojects.com/', None), + # 'xxhash': ('https://pypi.org/project/xxhash/', None), + # 'pygments': ('https://pygments.org/docs/', None), + # 'tqdm': ('https://tqdm.github.io/', None), + # Requries that the repo have objects.inv + 'kwarray': ('https://kwarray.readthedocs.io/en/latest/', None), + 'kwimage': ('https://kwimage.readthedocs.io/en/latest/', None), + # 'kwplot': ('https://kwplot.readthedocs.io/en/latest/', None), + 'ndsampler': ('https://ndsampler.readthedocs.io/en/latest/', None), + 'ubelt': ('https://ubelt.readthedocs.io/en/latest/', None), + 'xdoctest': ('https://xdoctest.readthedocs.io/en/latest/', None), + 'networkx': ('https://networkx.org/documentation/stable/', None), + 'scriptconfig': ('https://scriptconfig.readthedocs.io/en/latest/', None), + +} +__dev_note__ = """ +python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv +python -m sphinx.ext.intersphinx https://kwcoco.readthedocs.io/en/latest/objects.inv +python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv +python -m sphinx.ext.intersphinx https://kwarray.readthedocs.io/en/latest/objects.inv +python -m sphinx.ext.intersphinx https://kwimage.readthedocs.io/en/latest/objects.inv +python -m sphinx.ext.intersphinx https://ubelt.readthedocs.io/en/latest/objects.inv +python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv +""" + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'collapse_navigation': False, + 'display_version': True, + # 'logo_only': True, +} +# html_logo = '.static/netharn.svg' +# html_favicon = '.static/netharn.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'netharndoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'netharn.tex', 'netharn Documentation', + 'Jon Crall', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'netharn', 'netharn Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'netharn', 'netharn Documentation', + author, 'netharn', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Extension configuration ------------------------------------------------- + + +from sphinx.domains.python import PythonDomain # NOQA +# from sphinx.application import Sphinx # NOQA +from typing import Any, List # NOQA + + +class PatchedPythonDomain(PythonDomain): + """ + References: + https://github.com/sphinx-doc/sphinx/issues/3866 + """ + def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + # TODO: can use this to resolve references nicely + # if target.startswith('ub.'): + # target = 'ubelt.' + target[3] + return_value = super(PatchedPythonDomain, self).resolve_xref( + env, fromdocname, builder, typ, target, node, contnode) + return return_value + + +def process(app, what_: str, name: str, obj: Any, options: Any, lines: + List[str]) -> None: + """ + Custom process to transform docstring lines Remove "Ignore" blocks + + Args: + app (sphinx.application.Sphinx): the Sphinx application object + + what (str): + the type of the object which the docstring belongs to (one of + "module", "class", "exception", "function", "method", "attribute") + + name (str): the fully qualified name of the object + + obj: the object itself + + options: the options given to the directive: an object with + attributes inherited_members, undoc_members, show_inheritance + and noindex that are true if the flag option of same name was + given to the auto directive + + lines (List[str]): the lines of the docstring, see above + + References: + https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html + https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html + """ + # if what and what_ not in what: + # return + orig_lines = lines[:] + + # text = '\n'.join(lines) + # if 'Example' in text and 'CommandLine' in text: + # import xdev + # xdev.embed() + + ignore_tags = tuple(['Ignore']) + + mode = None + # buffer = None + new_lines = [] + for i, line in enumerate(orig_lines): + + # See if the line triggers a mode change + if line.startswith(ignore_tags): + mode = 'ignore' + elif line.startswith('CommandLine'): + mode = 'cmdline' + elif line and not line.startswith(' '): + # if the line startswith anything but a space, we are no + # longer in the previous nested scope + mode = None + + if mode is None: + new_lines.append(line) + elif mode == 'ignore': + # print('IGNORE line = {!r}'.format(line)) + pass + elif mode == 'cmdline': + if line.startswith('CommandLine'): + new_lines.append('.. rubric:: CommandLine') + new_lines.append('') + new_lines.append('.. code-block:: bash') + new_lines.append('') + # new_lines.append(' # CommandLine') + else: + # new_lines.append(line.strip()) + new_lines.append(line) + else: + raise KeyError(mode) + + lines[:] = new_lines + # make sure there is a blank line at the end + if lines and lines[-1]: + lines.append('') + + +def setup(app): + app.add_domain(PatchedPythonDomain, override=True) + if 1: + # New Way + # what = None + app.connect('autodoc-process-docstring', process) + else: + # OLD WAY + # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings + # Register a sphinx.ext.autodoc.between listener to ignore everything + # between lines that contain the word IGNORE + # from sphinx.ext.autodoc import between + # app.connect('autodoc-process-docstring', between('^ *Ignore:$', exclude=True)) + pass + return app diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..a864c6ecea1cab7adacdad370fe71f72dfc407a0 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,26 @@ +:gitlab_url: https://gitlab.kitware.com/computer-vision/netharn + +.. The large version wont work because github strips rst image rescaling. https://i.imgur.com/AcWVroL.png + # TODO: Add a logo + .. image:: https://i.imgur.com/PoYIsWE.png + :height: 100px + :align: left + +Welcome to netharn's documentation! +=================================== + +.. The __init__ files contains the top-level documentation overview +.. automodule:: netharn.__init__ + :show-inheritance: + +.. toctree:: + :maxdepth: 5 + + netharn + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` \ No newline at end of file diff --git a/netharn/__init__.py b/netharn/__init__.py index cc2d4c1340350976618e0a2ad4c8ef452815d47d..a3fe57f227e6cf9fdde981d2e9a7074d62e38b23 100644 --- a/netharn/__init__.py +++ b/netharn/__init__.py @@ -4,7 +4,7 @@ mkinit netharn --noattrs --dry mkinit netharn --noattrs """ -__version__ = '0.5.18' +__version__ = '0.6.0' try: # PIL 7.0.0 removed PIL_VERSION, which breaks torchvision, monkey patch it diff --git a/netharn/analytic/receptive_field_for.py b/netharn/analytic/receptive_field_for.py index a768ee1eadcabb65b94c3d0152f707a40bde99fa..04d2d3ee357ed4f8b90c42eeff1d1091f659850c 100644 --- a/netharn/analytic/receptive_field_for.py +++ b/netharn/analytic/receptive_field_for.py @@ -10,12 +10,18 @@ import numpy as np from collections import OrderedDict from netharn.analytic.output_shape_for import OutputShapeFor from netharn.analytic import analytic_for -from distutils.version import LooseVersion # try: # from netharn.device import MountedModel # except ImportError: # MountedModel = None + +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion + + REGISTERED_TYPES = [] diff --git a/netharn/api.py b/netharn/api.py index c1e1ef939350407bacc73a3359cb53647113d3b0..a34d7fc456bba5104a572c019c780574b8aaa1cf 100644 --- a/netharn/api.py +++ b/netharn/api.py @@ -10,7 +10,11 @@ Concepts: import ubelt as ub import torch -from distutils.version import LooseVersion +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion + _TORCH_IS_GE_1_2_0 = LooseVersion(torch.__version__) >= LooseVersion('1.2.0') @@ -105,7 +109,7 @@ def _coerce_datasets(config): stats_idxs = kwarray.shuffle(np.arange(len(_dset)), rng=0)[0:min(1000, len(_dset))] stats_subset = torch.utils.data.Subset(_dset, stats_idxs) - cacher = ub.Cacher('dset_mean', cfgstr=_dset.input_id + 'v3') + cacher = ub.Cacher('dset_mean', depends=_dset.input_id + 'v3') input_stats = cacher.tryload() from netharn.data.channel_spec import ChannelSpec @@ -823,8 +827,12 @@ def configure_hacks(config={}, **kw): config = _update_defaults(config, kw) if config.get('workers', 0) > 0: - import cv2 - cv2.setNumThreads(0) + try: + import cv2 + except ImportError: + pass + else: + cv2.setNumThreads(0) strat = config.get('sharing_strategy', None) if strat is not None and strat != 'default': diff --git a/netharn/criterions/focal.py b/netharn/criterions/focal.py index 8a855d96a341157ffc2014d33ef61d7c29fc38a4..9d3a9773cc8f17b01b657d50ee9b8c82e80858b8 100644 --- a/netharn/criterions/focal.py +++ b/netharn/criterions/focal.py @@ -2,7 +2,10 @@ import torch # NOQA import torch.nn.functional as F import torch.nn.modules -from distutils.version import LooseVersion +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion if LooseVersion(torch.__version__) < LooseVersion('1.0.0'): diff --git a/netharn/data/batch_samplers.py b/netharn/data/batch_samplers.py index 4864a499598b0767db62ed8083e4a31ff3628c75..0b14d5bbc919e9a5d67f011f79648a8c3a10d862 100644 --- a/netharn/data/batch_samplers.py +++ b/netharn/data/batch_samplers.py @@ -66,12 +66,16 @@ class MatchingSamplerPK(ub.NiceRepr, torch.utils.data.sampler.BatchSampler): # For each of these any negative could be chosen # The number of distinct triples contributed by this PCC is the # product of num_pos_edges and num_neg_edges. - import scipy + import scipy # NOQA + try: + from scipy.special import comb + except ImportError: + from scipy.misc import comb self.num_triples = 0 self.num_pos_edges = 0 default_num_batches = 0 for pcc in ub.ProgIter(self.pccs, 'pccs', enabled=0): - num_pos_edges = scipy.special.comb(len(pcc), 2) + num_pos_edges = comb(len(pcc), 2) if num_pos_edges > 0: default_num_batches += len(pcc) other_pccs = [c for c in self.pccs if c is not pcc] diff --git a/netharn/data/data_containers.py b/netharn/data/data_containers.py index 96e7bdd1a5c765c341da9908b491d176666352c9..49c34256a93e6741c6c3dfcffef06475ef93164f 100644 --- a/netharn/data/data_containers.py +++ b/netharn/data/data_containers.py @@ -15,7 +15,6 @@ import torch import ubelt as ub import numpy as np # NOQA import re -import collections import torch.nn.functional as F # from torch.nn.parallel import DataParallel from itertools import chain @@ -23,6 +22,7 @@ from netharn.device import DataParallel, DataSerial, XPU from torch.nn.parallel._functions import _get_stream from torch.nn.parallel._functions import Scatter as OrigScatter from torch.nn.parallel._functions import Gather as OrigGather + try: import collections.abc as container_abcs from six import string_types as string_classes @@ -493,17 +493,17 @@ def container_collate(inbatch, num_devices=None): >>> print('batch = {}'.format(ub.repr2(batch, nl=1))) """ - if not isinstance(inbatch, collections.Sequence): + if not isinstance(inbatch, container_abcs.Sequence): raise TypeError("{} is not supported.".format(inbatch.dtype)) item0 = inbatch[0] if isinstance(item0, ItemContainer): return item0.__class__._collate(inbatch, num_devices=num_devices) - elif isinstance(item0, collections.Sequence): + elif isinstance(item0, container_abcs.Sequence): transposed = zip(*inbatch) return [container_collate(samples, num_devices=num_devices) for samples in transposed] - elif isinstance(item0, collections.Mapping): + elif isinstance(item0, container_abcs.Mapping): return { key: container_collate([d[key] for d in inbatch], num_devices=num_devices) diff --git a/netharn/data/transforms/augmenter_base.py b/netharn/data/transforms/augmenter_base.py index fc6d458352d534149b94391f8375a1530c5ed605..44d14d0c37727c6532a9c4692a0282ccb99b70a5 100644 --- a/netharn/data/transforms/augmenter_base.py +++ b/netharn/data/transforms/augmenter_base.py @@ -5,8 +5,11 @@ try: import imgaug _Augmenter = imgaug.augmenters.Augmenter except Exception: - import warnings - warnings.warn('imgaug is not availble', DeprecationWarning) + # imgaug is deprecated, don't warn and dont use + imgaug = None + if 0: + import warnings + warnings.warn('imgaug is not availble', DeprecationWarning) _Augmenter = object @@ -16,6 +19,8 @@ class ParamatarizedAugmenter(_Augmenter): """ def __init__(self, *args, **kwargs): + if imgaug is None: + raise Exception('imgaug is not available, but is needed to create an instance of ParamatarizedAugmenter. Moving away from imgaug is encouraged') super(ParamatarizedAugmenter, self).__setattr__('_initialized', True) super(ParamatarizedAugmenter, self).__setattr__('_registered_params', OrderedDict()) super(ParamatarizedAugmenter, self).__init__(*args, **kwargs) diff --git a/netharn/data/transforms/augmenters.py b/netharn/data/transforms/augmenters.py index 178f92fb96c1f8e0d8bb70bd0db7414b6bd3e6ae..27f947ec7cd44c5d453febbb888d4db723bc0b29 100644 --- a/netharn/data/transforms/augmenters.py +++ b/netharn/data/transforms/augmenters.py @@ -8,8 +8,9 @@ try: import imgaug from imgaug.parameters import (Uniform, Binomial) except Exception: - import warnings - warnings.warn('imgaug is not availble', DeprecationWarning) + if 0: + import warnings + warnings.warn('imgaug is not availble', DeprecationWarning) def demodata_hsv_image(w=200, h=200): diff --git a/netharn/examples/cifar.py b/netharn/examples/cifar.py index bb18fb3b43d2aa31cd635caf8064c897196b2bb0..1c1b36b24e80bd09e5b224e8a1b4122be1cff82a 100644 --- a/netharn/examples/cifar.py +++ b/netharn/examples/cifar.py @@ -776,7 +776,7 @@ def setup_harn(): hyper = nh.HyperParams( # Datasets must be preconstructed datasets=datasets, - nice=config['nice'], + name=config['nice'], # Loader may be preconstructed loaders=loaders, workdir=config['workdir'], @@ -844,17 +844,17 @@ def main(): if ub.argflag('--lrtest'): """ python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 \ - --nice=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ + --name=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ --lr=0.1 --init=kaiming_normal \ --batch_size=2048 --lrtest --show python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b7 \ - --nice=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ + --name=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ --lr=0.1 --init=kaiming_normal \ --batch_size=256 --lrtest --show python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b7 \ - --nice=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ + --name=test_cifar9 --optim=adamw --schedule=Exponential-g0.98 \ --lr=4e-2 --init=kaiming_normal \ --batch_size=256 """ @@ -924,43 +924,43 @@ if __name__ == '__main__': CommandLine: - python -m netharn.examples.cifar --xpu=0 --nice=resnet50_baseline --arch=resnet50 --optim=sgd --schedule=step-150-250 --lr=0.1 - python -m netharn.examples.cifar --xpu=0 --nice=wrn --arch=wrn_22 --optim=sgd --schedule=step-150-250 --lr=0.1 - python -m netharn.examples.cifar --xpu=0 --nice=densenet --arch=densenet121 --optim=sgd --schedule=step-150-250 --lr=0.1 + python -m netharn.examples.cifar --xpu=0 --name=resnet50_baseline --arch=resnet50 --optim=sgd --schedule=step-150-250 --lr=0.1 + python -m netharn.examples.cifar --xpu=0 --name=wrn --arch=wrn_22 --optim=sgd --schedule=step-150-250 --lr=0.1 + python -m netharn.examples.cifar --xpu=0 --name=densenet --arch=densenet121 --optim=sgd --schedule=step-150-250 --lr=0.1 - python -m netharn.examples.cifar --xpu=0 --nice=se_resnet18 --arch=se_resnet18 --optim=sgd --schedule=step-150-250 --lr=0.01 --init=noop --decay=1e-5 --augment=simple + python -m netharn.examples.cifar --xpu=0 --name=se_resnet18 --arch=se_resnet18 --optim=sgd --schedule=step-150-250 --lr=0.01 --init=noop --decay=1e-5 --augment=simple - python -m netharn.examples.cifar --xpu=0 --nice=resnet50_newaug_b128 --batch_size=128 --arch=resnet50 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple + python -m netharn.examples.cifar --xpu=0 --name=resnet50_newaug_b128 --batch_size=128 --arch=resnet50 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet7_newaug_b128 --batch_size=128 --arch=efficientnet-b7 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple + python -m netharn.examples.cifar --xpu=0 --name=efficientnet7_newaug_b128 --batch_size=128 --arch=efficientnet-b7 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet3_newaug_b128 --batch_size=128 --arch=efficientnet-b3 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple + python -m netharn.examples.cifar --xpu=0 --name=efficientnet3_newaug_b128 --batch_size=128 --arch=efficientnet-b3 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_newaug_b128 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_newaug_b128 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b128_sz32 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b128_sz32 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b64_sz224 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=224,224 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b64_sz224 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=224,224 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_newaug_b64_sz224 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple --input_dims=224,224 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_newaug_b64_sz224 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-150-250 --lr=0.1 --init=kaiming_normal --augment=simple --input_dims=224,224 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b128_sz32_v2 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-20-45-70-90-f5 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 # 88% + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b128_sz32_v2 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-20-45-70-90-f5 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 # 88% - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b128_sz32_v3 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-13-20-45-70-90-f5 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b128_sz32_v3 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-13-20-45-70-90-f5 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b128_sz32_v4 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-10-20-45-70-90-f5 --max_epoch=100 --lr=0.03 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b128_sz32_v4 --batch_size=128 --arch=efficientnet-b0 --optim=sgd --schedule=step-10-20-45-70-90-f5 --max_epoch=100 --lr=0.03 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=32,32 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_transfer_b64_sz224_v2 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-10-20 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=224,224 + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_transfer_b64_sz224_v2 --batch_size=64 --arch=efficientnet-b0 --optim=sgd --schedule=step-10-20 --max_epoch=100 --lr=0.01 --decay=5e-4 --init=cls --augment="crop,flip,gray,cutout" --input_dims=224,224 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet0_newaug_yogi_b1024 \ + python -m netharn.examples.cifar --xpu=0 --name=efficientnet0_newaug_yogi_b1024 \ --batch_size=1028 --arch=efficientnet-b0 --optim=Yogi \ --schedule=step-60-120-160-250-350-f5 --decay=5e-4 --lr=0.01549 \ --init=kaiming_normal --augment=simple --grad_norm_max=35 \ --warmup_iters=100 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet1_newaug_diffgrad_b1024 \ + python -m netharn.examples.cifar --xpu=0 --name=efficientnet1_newaug_diffgrad_b1024 \ --batch_size=1028 --arch=efficientnet-b1 --optim=DiffGrad \ --schedule=step-60-120-160-250-350-f5 --decay=5e-4 --lr=0.01 \ --init=kaiming_normal --augment=simple --grad_norm_max=35 \ @@ -968,7 +968,7 @@ if __name__ == '__main__': # Params from Cutout paper: https://arxiv.org/pdf/1708.04552.pdf - python -m netharn.examples.cifar --xpu=0 --nice=repro_cutout \ + python -m netharn.examples.cifar --xpu=0 --name=repro_cutout \ --batch_size=128 \ --arch=efficientnet-b0 \ --optim=sgd --lr=0.01 --decay=5e-4 \ @@ -976,7 +976,7 @@ if __name__ == '__main__': --init=kaiming_normal --augment=simple \ --grad_norm_max=35 --warmup_iters=100 - python -m netharn.examples.cifar --xpu=0 --nice=repro_cutoutDiffGrad \ + python -m netharn.examples.cifar --xpu=0 --name=repro_cutoutDiffGrad \ --batch_size=128 \ --arch=efficientnet-b1 \ --optim=DiffGrad --lr=0.01 --decay=5e-4 \ @@ -987,7 +987,7 @@ if __name__ == '__main__': 0.015219216761025578 - python -m netharn.examples.cifar --xpu=0 --nice=efficientnet7_scratch \ + python -m netharn.examples.cifar --xpu=0 --name=efficientnet7_scratch \ --arch=efficientnet-b7 --optim=sgd --schedule=step-150-250-350 \ --batch_size=512 --lr=0.01 --init=noop --decay=1e-5 @@ -998,13 +998,13 @@ if __name__ == '__main__': python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 # This next command requires a bit more compute - python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --nice=test_cifar2 --schedule=step-3-6-50 --lr=0.1 --init=cls --batch_size=2718 + python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --name=test_cifar2 --schedule=step-3-6-50 --lr=0.1 --init=cls --batch_size=2718 - python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --nice=test_cifar3 --schedule=step-3-6-12-16 --lr=0.256 --init=cls --batch_size=3000 --workers=2 --num_vali=0 --optim=rmsprop + python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --name=test_cifar3 --schedule=step-3-6-12-16 --lr=0.256 --init=cls --batch_size=3000 --workers=2 --num_vali=0 --optim=rmsprop - python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --nice=test_cifar3 --schedule=onecycle70 --lr=0.01 --init=cls --batch_size=3000 --workers=2 --num_vali=0 --optim=sgd --datasets=cifar100 + python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --name=test_cifar3 --schedule=onecycle70 --lr=0.01 --init=cls --batch_size=3000 --workers=2 --num_vali=0 --optim=sgd --datasets=cifar100 - python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --nice=test_cifar2 --schedule=ReduceLROnPlateau-p1-c1-f0.9 --lr=0.1 --init=cls --batch_size=2719 --workers=4 --optim=sgd --datasets=cifar100 + python -m netharn.examples.cifar --xpu=0 --arch=efficientnet-b0 --name=test_cifar2 --schedule=ReduceLROnPlateau-p1-c1-f0.9 --lr=0.1 --init=cls --batch_size=2719 --workers=4 --optim=sgd --datasets=cifar100 python -m netharn.examples.cifar.py --xpu=0 --arch=densenet121 # Train on two GPUs with a larger batch size diff --git a/netharn/examples/classification.py b/netharn/examples/classification.py index 5a3c238d3aa76fff5d7ec2c2c6b3cc2b28fdc7a6..d2fa37df965efad64eaacb5a572a3760d939b5ce 100644 --- a/netharn/examples/classification.py +++ b/netharn/examples/classification.py @@ -739,7 +739,7 @@ def setup_harn(cmdline=True, **kw): stats_idxs = kwarray.shuffle(np.arange(len(_dset)), rng=0)[0:min(1000, len(_dset))] stats_subset = torch.utils.data.Subset(_dset, stats_idxs) - cacher = ub.Cacher('dset_mean', cfgstr=_dset.input_id + 'v3') + cacher = ub.Cacher('dset_mean', depends=_dset.input_id + 'v3') input_stats = cacher.tryload() channels = ChannelSpec.coerce(config['channels']) diff --git a/netharn/examples/object_detection.py b/netharn/examples/object_detection.py index 760ecde138c25dc58fff257b0f6810659977c163..c3c6ca29d760867023612b7d45ca1c7348595ee5 100644 --- a/netharn/examples/object_detection.py +++ b/netharn/examples/object_detection.py @@ -723,7 +723,7 @@ def setup_harn(cmdline=True, **kw): _dset = torch_datasets['train'] stats_idxs = kwarray.shuffle(np.arange(len(_dset)), rng=0)[0:min(1000, len(_dset))] stats_subset = torch.utils.data.Subset(_dset, stats_idxs) - cacher = ub.Cacher('dset_mean', cfgstr=_dset.input_id + 'v2') + cacher = ub.Cacher('dset_mean', depends=_dset.input_id + 'v2') input_stats = cacher.tryload() if input_stats is None: # Use parallel workers to load data faster @@ -893,7 +893,7 @@ if __name__ == '__main__': --workers=4 --xpu=auto --batch_size=4 --bstep=4 python -m netharn.examples.object_detection \ - --nice=voc-detection-demo \ + --name=voc-detection-demo \ --train_dataset=~/data/VOC/voc-trainval.mscoco.json \ --vali_dataset=~/data/VOC/voc-test-2007.mscoco.json \ --pretrained=imagenet \ diff --git a/netharn/examples/segmentation.py b/netharn/examples/segmentation.py index deb0f334ee4733ab8fc1168f83fdae0e7e77c855..60f0dafb5e154658c58142a3e307219e7d154fd7 100644 --- a/netharn/examples/segmentation.py +++ b/netharn/examples/segmentation.py @@ -555,7 +555,7 @@ def _cached_class_frequency(dset, workers=0): dset_copy.augmenter = None cfgstr = '_'.join([dset_copy.sampler.dset.hashid, 'v1']) - cacher = ub.Cacher('class_freq', cfgstr=cfgstr) + cacher = ub.Cacher('class_freq', depends=cfgstr) total_freq = cacher.tryload() if total_freq is None: @@ -720,7 +720,7 @@ def setup_harn(cmdline=True, **kw): stats_dset = torch_datasets['train'] stats_idxs = kwarray.shuffle(np.arange(len(stats_dset)), rng=0)[0:min(1000, len(stats_dset))] stats_subset = torch.utils.data.Subset(stats_dset, stats_idxs) - cacher = ub.Cacher('dset_mean', cfgstr=stats_dset.input_id + 'v3') + cacher = ub.Cacher('dset_mean', depends=stats_dset.input_id + 'v3') input_stats = cacher.tryload() if input_stats is None: loader = torch.utils.data.DataLoader( @@ -756,7 +756,7 @@ def setup_harn(cmdline=True, **kw): # Create hyperparameters hyper = nh.HyperParams( - nice=config['name'], + name=config['name'], workdir=config['workdir'], xpu=nh.XPU.coerce(config['xpu']), diff --git a/netharn/examples/sseg_camvid.py b/netharn/examples/sseg_camvid.py index 851a576d3040b2a10c16908e6d16ed7ef458043c..d994b022656b6d30d207e030127572c9a105f4f1 100644 --- a/netharn/examples/sseg_camvid.py +++ b/netharn/examples/sseg_camvid.py @@ -7,7 +7,7 @@ MS-COCO dataset see segmentation.py. NOTE: This will eventually be deprecated and repalced by "segmentation.py" CommandLine: - python ~/code/netharn/examples/sseg_camvid.py --workers=4 --xpu=0 --batch_size=2 --nice=expt1 + python ~/code/netharn/examples/sseg_camvid.py --workers=4 --xpu=0 --batch_size=2 --name=expt1 """ from __future__ import absolute_import, division, print_function, unicode_literals from os.path import join @@ -840,7 +840,7 @@ def _cached_class_frequency(dset): dset_copy.augmenter = None cfgstr = '_'.join([dset_copy.sampler.dset.hashid, 'v1']) - cacher = ub.Cacher('class_freq', cfgstr=cfgstr) + cacher = ub.Cacher('class_freq', depends=cfgstr) total_freq = cacher.tryload() if total_freq is None: @@ -1102,19 +1102,19 @@ if __name__ == '__main__': CommandLine: python -m netharn.examples.sseg_camvid \ - --nice=camvid_segnet --arch=segnet --init=cls \ + --name=camvid_segnet --arch=segnet --init=cls \ --workers=4 --xpu=auto \ --batch_size=16 --lr=1e-3 \ --input_dims=64,64 python -m netharn.examples.sseg_camvid \ - --nice=camvid_psp --arch=psp --init=cls \ + --name=camvid_psp --arch=psp --init=cls \ --workers=4 --xpu=auto \ --batch_size=16 --lr=1e-3 \ --input_dims=64,64 python -m netharn.examples.sseg_camvid \ - --nice=camvid_deeplab --arch=deeplab --init=cls \ + --name=camvid_deeplab --arch=deeplab --init=cls \ --workers=4 --xpu=auto \ --batch_size=16 --lr=1e-3 \ --input_dims=64,64 diff --git a/netharn/examples/yolo_voc.py b/netharn/examples/yolo_voc.py index 315d66be9d7cf297d49cb99f48d2c21920b2b14b..327bdf4b14bd0853bc87f7e8318212193387aaeb 100644 --- a/netharn/examples/yolo_voc.py +++ b/netharn/examples/yolo_voc.py @@ -657,7 +657,7 @@ def setup_yolo_harness(bsize=16, workers=0): xpu = nh.XPU.coerce('argv') - nice = ub.argval('--nice', default='Yolo2Baseline') + nice = ub.argval('--name', default='Yolo2Baseline') batch_size = int(ub.argval('--batch_size', default=bsize)) bstep = int(ub.argval('--bstep', 4)) workers = int(ub.argval('--workers', default=workers)) @@ -840,43 +840,43 @@ if __name__ == '__main__': r""" CommandLine: srun -c 4 -p priority --gres=gpu:1 \ - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --nice=rescaled --lr=0.001 --bstep=4 --workers=4 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --name=rescaled --lr=0.001 --bstep=4 --workers=4 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --nice=new_loss_v2 --lr=0.001 --bstep=4 --workers=4 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --name=new_loss_v2 --lr=0.001 --bstep=4 --workers=4 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --nice=eav_run --lr=0.001 --bstep=4 --workers=6 --eav - python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --nice=pjr_run2 --lr=0.001 --bstep=4 --workers=6 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=16 --name=eav_run --lr=0.001 --bstep=4 --workers=6 --eav + python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --name=pjr_run2 --lr=0.001 --bstep=4 --workers=6 - python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --nice=fixed_nms --lr=0.001 --bstep=4 --workers=6 + python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --name=fixed_nms --lr=0.001 --bstep=4 --workers=6 - python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --nice=fixed_lrs --lr=0.001 --bstep=4 --workers=6 + python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=16 --name=fixed_lrs --lr=0.001 --bstep=4 --workers=6 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=eav_run2 --lr=0.001 --bstep=4 --workers=8 --eav - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=pjr_run2 --lr=0.001 --bstep=4 --workers=4 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=eav_run2 --lr=0.001 --bstep=4 --workers=8 --eav + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=pjr_run2 --lr=0.001 --bstep=4 --workers=4 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=4 --nice=pjr_run2 --lr=0.001 --bstep=8 --workers=4 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=4 --name=pjr_run2 --lr=0.001 --bstep=8 --workers=4 - python -m netharn.examples.yolo_voc train --gpu=0,1 --batch_size=32 --nice=july23 --lr=0.001 --bstep=2 --workers=8 - python -m netharn.examples.yolo_voc train --gpu=2 --batch_size=16 --nice=july23_lr_x8 --lr=0.008 --bstep=4 --workers=6 + python -m netharn.examples.yolo_voc train --gpu=0,1 --batch_size=32 --name=july23 --lr=0.001 --bstep=2 --workers=8 + python -m netharn.examples.yolo_voc train --gpu=2 --batch_size=16 --name=july23_lr_x8 --lr=0.008 --bstep=4 --workers=6 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=batchaware2 --lr=0.001 --bstep=8 --workers=3 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=batchaware2 --lr=0.001 --bstep=8 --workers=3 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=july_eav_run3 --lr=0.001 --bstep=8 --workers=6 --eav - python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=8 --nice=july_eav_run4 --lr=0.002 --bstep=8 --workers=6 --eav - python -m netharn.examples.yolo_voc train --gpu=2 --batch_size=16 --nice=july_pjr_run4 --lr=0.001 --bstep=4 --workers=6 + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=july_eav_run3 --lr=0.001 --bstep=8 --workers=6 --eav + python -m netharn.examples.yolo_voc train --gpu=1 --batch_size=8 --name=july_eav_run4 --lr=0.002 --bstep=8 --workers=6 --eav + python -m netharn.examples.yolo_voc train --gpu=2 --batch_size=16 --name=july_pjr_run4 --lr=0.001 --bstep=4 --workers=6 - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=july_eav_run4_hack1 --lr=0.001 --bstep=8 --workers=6 --eav --weights=/home/local/KHQ/jon.crall/work/voc_yolo2/fit/nice/july_eav_run_hack/torch_snapshots/_epoch_00000150.pt + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=july_eav_run4_hack1 --lr=0.001 --bstep=8 --workers=6 --eav --weights=/home/local/KHQ/jon.crall/work/voc_yolo2/fit/nice/july_eav_run_hack/torch_snapshots/_epoch_00000150.pt - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=lightnet_start --lr=0.001 --bstep=8 --workers=6 --eav --weights=lightnet + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=lightnet_start --lr=0.001 --bstep=8 --workers=6 --eav --weights=lightnet - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=HOPE --lr=0.001 --bstep=8 --workers=6 --eav --weights=imagenet - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=HOPE2 --lr=0.001 --bstep=8 --workers=6 --eav --weights=imagenet - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=HOPE3 --lr=0.001 --bstep=8 --workers=4 --eav --weights=imagenet + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=HOPE --lr=0.001 --bstep=8 --workers=6 --eav --weights=imagenet + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=HOPE2 --lr=0.001 --bstep=8 --workers=6 --eav --weights=imagenet + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=HOPE3 --lr=0.001 --bstep=8 --workers=4 --eav --weights=imagenet - python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --nice=HOPE4 --lr=0.001 --bstep=8 --workers=4 --eav --weights=imagenet + python -m netharn.examples.yolo_voc train --gpu=0 --batch_size=8 --name=HOPE4 --lr=0.001 --bstep=8 --workers=4 --eav --weights=imagenet python -m netharn.examples.yolo_voc train --gpu=0 --workers=4 --weights=lightnet diff --git a/netharn/export/__init__.py b/netharn/export/__init__.py index 15794cc6d13320606e6011b5accdcdcf104f86f2..118cee5e67d6b822f8dbae874511f17ddfab8a8b 100644 --- a/netharn/export/__init__.py +++ b/netharn/export/__init__.py @@ -1,4 +1,4 @@ -""" +r""" NOTICE: ``netharn.export`` has been refactored into the packages ``liberator`` which performs general code extraction and ``torch_liberator`` which is specific to pytorch. This module is deprecated and will be removed in the @@ -54,7 +54,7 @@ Example: >>> }) >>> harn = nh.FitHarn(hyper) >>> harn.preferences['use_tensorboard'] = False - >>> harn.preferences['timeout'] = 1 + >>> harn.preferences['timeout'] = 10 >>> harn.intervals['test'] = 1 >>> harn.initialize(reset='delete') >>> harn.run() @@ -110,7 +110,11 @@ Example: >>> # Now create an instance of deployed model that points to the >>> # Training dpath. (Note the directory structure setup by netharn is >>> # itself a deployment, it just has multiple files) + >>> import time >>> deployer = torch_liberator.DeployedModel(harn.train_dpath) + >>> train_path = ub.Path(harn.train_dpath) + >>> print(ub.repr2(list(train_path.walk()))) + >>> print('deployer.info = {}'.format(ub.repr2(deployer.info, nl=1))) >>> # Use the DeployedModel to package the imporant info in train_dpath >>> # into a standalone zipfile. >>> zip_fpath = deployer.package() @@ -141,6 +145,10 @@ Example: outputs = tensor([[0.4105, 0.5895]], grad_fn=) model.__module__ = 'deploy_ToyNet2d_onnxqaww_002_HVWCGI/ToyNet2d_2a3f49' harn.model.module.__module__ = 'netharn.models.toynet' + >>> model = None + >>> loader = None + >>> outputs = None + >>> images = None """ from netharn.export import deployer from netharn.export import exporter diff --git a/netharn/fit_harn.py b/netharn/fit_harn.py index 54c048ab0315b1021e9751d17f06a0b70e719f72..38623de9d8e6124820900d5cf9cb0dcb2d025e82 100644 --- a/netharn/fit_harn.py +++ b/netharn/fit_harn.py @@ -153,7 +153,6 @@ import traceback from os.path import join from os.path import exists from os.path import dirname -from distutils.version import LooseVersion import torch import numpy as np @@ -168,6 +167,15 @@ from netharn.util import profiler from netharn.util import strip_ansi from netharn.exceptions import (CannotResume, SkipBatch, StopTraining, TrainingDiverged) + +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion + + +# Hack: patch collections so tensorboard_logger doesnt die +from netharn import monkey # NOQA try: import tensorboard_logger except ImportError: @@ -333,18 +341,22 @@ class ExtraMixins(object): References: https://github.com/pytorch/pytorch/issues/1355 """ - import cv2 n_workers = max(loader.num_workers for loader in harn.loaders.values() if loader is not None) if n_workers > 1: - n_threads = cv2.getNumThreads() - if n_threads > 1: - msg = ('OpenCV threadcount of {} is non-zero and a DataLoader ' - 'is using {} workers. This may cause deadlocks ' - 'To be safe use cv2.setNumThreads(0)').format( - n_threads, n_workers) - warnings.warn(msg, RuntimeWarning) - harn.warn(msg) + try: + import cv2 + except ImportError: + pass + else: + n_threads = cv2.getNumThreads() + if n_threads > 1: + msg = ('OpenCV threadcount of {} is non-zero and a DataLoader ' + 'is using {} workers. This may cause deadlocks ' + 'To be safe use cv2.setNumThreads(0)').format( + n_threads, n_workers) + warnings.warn(msg, RuntimeWarning) + harn.warn(msg) @register_mixin diff --git a/netharn/hyperparams.py b/netharn/hyperparams.py index fef7b35d848c5ae6ca43f38de0997765c39690ec..b0b143d476c5d0e0b6dd1eb5a526615cc8d21485 100644 --- a/netharn/hyperparams.py +++ b/netharn/hyperparams.py @@ -524,7 +524,7 @@ class HyperParams(object): return model def make_optimizer(hyper, named_parameters): - """ + r""" Instantiate the optimizer defined by the hyperparams Contains special logic to create param groups diff --git a/netharn/initializers/lsuv.py b/netharn/initializers/lsuv.py index 7882edbfc70aaa0bf7658e720b14ef247abd8d5d..aed4d105058e7e693e1143b7395505508e7eb324 100644 --- a/netharn/initializers/lsuv.py +++ b/netharn/initializers/lsuv.py @@ -40,7 +40,7 @@ def svd_orthonormal(shape, rng=None, cache_key=None): # TODO: only cache very large matrices (4096x4096) # TODO: only cache very large matrices, not (256,256,3,3) cacher = ub.Cacher('svd_orthonormal', appname='netharn', enabled=enabled, - cfgstr=cfgstr) + depends=cfgstr) q = cacher.tryload() if q is None: # print('Compute orthonormal matrix with shape ' + str(shape)) diff --git a/netharn/mixins.py b/netharn/mixins.py index ac12691d7b323f607886beb9649518e7be0c7042..47ac5a9fcdcb97d39c38d99a55b3e5ddec067858 100644 --- a/netharn/mixins.py +++ b/netharn/mixins.py @@ -9,7 +9,10 @@ The purpose of this file is to contain functions that might not general-purpose enough to add to FitHarn itself, but they are also common enough, where it makes no sense to write them from scratch for each new project. """ -from distutils.version import LooseVersion +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion def _dump_monitor_tensorboard(harn, mode='epoch', special_groupers=['loss'], diff --git a/netharn/models/yolo2/light_region_loss.py b/netharn/models/yolo2/light_region_loss.py index ced201efc22acba14bf743008d40d4642f6f583b..57776b4304e8c8dc6578636c441520ccd045ca30 100644 --- a/netharn/models/yolo2/light_region_loss.py +++ b/netharn/models/yolo2/light_region_loss.py @@ -12,7 +12,10 @@ Speedups import torch import torch.nn as nn import numpy as np # NOQA -from distutils.version import LooseVersion +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion _TORCH_HAS_BOOL_COMP = LooseVersion(torch.__version__) >= LooseVersion('1.2.0') diff --git a/netharn/models/yolo2/yolo2.py b/netharn/models/yolo2/yolo2.py index 4e434f4fb4c669c4f1d9ffcdaa1b7c1078b3af58..c5f05d9a575c2c2ce602d9ebc86b5923f28295ff 100644 --- a/netharn/models/yolo2/yolo2.py +++ b/netharn/models/yolo2/yolo2.py @@ -9,7 +9,10 @@ import torch import torch.nn as nn import ubelt as ub from netharn import layers -from distutils.version import LooseVersion +try: # nocover + from packaging.version import parse as LooseVersion +except ImportError: + from distutils.version import LooseVersion _TORCH_HAS_BOOL_COMP = LooseVersion(torch.__version__) >= LooseVersion('1.2.0') diff --git a/netharn/monitor.py b/netharn/monitor.py index 34964273a7a595f420956b72d7495732ac44100c..1260e6b953e3dcfae9501bdef02c9739bb4999e8 100644 --- a/netharn/monitor.py +++ b/netharn/monitor.py @@ -13,12 +13,13 @@ import ubelt as ub __all__ = ['Monitor'] -def demodata_monitor(): +def demodata_monitor(ignore_first_epochs=0): rng = np.random.RandomState(0) n = 300 losses = (sorted(rng.randint(10, n, size=n)) + rng.randint(0, 20, size=n) - 10)[::-1] mious = (sorted(rng.randint(10, n, size=n)) + rng.randint(0, 20, size=n) - 10) - monitor = Monitor(minimize=['loss'], maximize=['miou'], smoothing=0.0) + monitor = Monitor(minimize=['loss'], maximize=['miou'], smoothing=0.0, + ignore_first_epochs=ignore_first_epochs) for epoch, (loss, miou) in enumerate(zip(losses, mious)): monitor.update(epoch, {'loss': loss, 'miou': miou}) return monitor @@ -40,6 +41,9 @@ class Monitor(ub.NiceRepr): to wait before quiting if the quality metrics are not improving. min_lr (float): If specified stop learning after lr drops beyond this point + ignore_first_epochs (int): If specified, ignore the results from the + first few epochs. Determine what the best model is after this + point. Example: >>> # simulate loss going down and then overfitting @@ -52,11 +56,40 @@ class Monitor(ub.NiceRepr): >>> for epoch, (loss, miou) in enumerate(zip(losses, mious)): >>> monitor.update(epoch, {'loss': loss, 'miou': miou}) >>> # xdoctest: +REQUIRES(--show) + >>> import kwplot + >>> kwplot.autompl() + >>> monitor.show() + + Example: + >>> # Test the ignore first param + >>> from netharn.monitor import * + >>> rng = np.random.RandomState(0) + >>> n = 300 + >>> losses = (sorted(rng.randint(10, n, size=n)) + rng.randint(0, 20, size=n) - 10)[::-1] + >>> mious = (sorted(rng.randint(10, n, size=n)) + rng.randint(0, 20, size=n) - 10) + >>> monitor = Monitor(minimize=['loss'], smoothing=.6, ignore_first_epochs=3) + >>> monitor.update(0, {'loss': 0.001}) + >>> monitor.update(1, {'loss': 9.40}) + >>> monitor.update(2, {'loss': 1.40}) + >>> monitor.update(3, {'loss': 0.40}) + >>> monitor.update(4, {'loss': 0.30}) + >>> monitor.update(5, {'loss': 0.35}) + >>> monitor.update(6, {'loss': 0.33}) + >>> monitor.update(7, {'loss': 0.31}) + >>> monitor.update(8, {'loss': 0.32}) + >>> monitor.update(9, {'loss': 0.33}) + >>> monitor.update(10, {'loss': 0.311}) + >>> monitor.update(11, {'loss': 0.4}) + >>> monitor.update(12, {'loss': 0.5}) + >>> monitor.update(13, {'loss': 0.6}) + >>> # xdoctest: +REQUIRES(--show) + >>> import kwplot + >>> kwplot.autompl() >>> monitor.show() """ def __init__(monitor, minimize=['loss'], maximize=[], smoothing=0.0, - patience=None, max_epoch=1000, min_lr=None): + patience=None, max_epoch=1000, min_lr=None, ignore_first_epochs=0): # Internal attributes monitor._ewma = util.ExpMovingAve(alpha=1 - smoothing) @@ -81,6 +114,7 @@ class Monitor(ub.NiceRepr): monitor.patience = patience monitor.max_epoch = max_epoch monitor.min_lr = min_lr + monitor.ignore_first_epochs = ignore_first_epochs def __nice__(self): import ubelt as ub @@ -88,6 +122,7 @@ class Monitor(ub.NiceRepr): 'patience': self.patience, 'max_epoch': self.max_epoch, 'min_lr': self.min_lr, + 'ignore_first_epochs': self.ignore_first_epochs, }, nl=0) @classmethod @@ -104,6 +139,7 @@ class Monitor(ub.NiceRepr): >>> cls, initkw = Monitor.coerce(config) >>> print('initkw = {}'.format(ub.repr2(initkw, nl=1))) initkw = { + 'ignore_first_epochs': 0, 'max_epoch': 100, 'min_lr': 1e-05, 'minimize': ['loss'], @@ -118,6 +154,7 @@ class Monitor(ub.NiceRepr): 'max_epoch': max_epoch, 'patience': config.get('patience', max_epoch), 'min_lr': config.get('min_lr', None), + 'ignore_first_epochs': config.get('ignore_first_epochs', 0), }) def show(monitor): @@ -141,6 +178,7 @@ class Monitor(ub.NiceRepr): ) # star all the good epochs + monitor.best_epochs(1) flags = np.array(monitor._is_good) if np.any(flags): plt.plot(list(ub.compress(monitor._epochs, flags)), @@ -224,13 +262,23 @@ class Monitor(ub.NiceRepr): improved_keys = monitor._improved(_smooth_metrics, monitor._best_smooth_metrics) if improved_keys: - if monitor._best_smooth_metrics is None: - monitor._best_smooth_metrics = _smooth_metrics.copy() - monitor._best_raw_metrics = _raw_metrics.copy() - else: - for key in improved_keys: - monitor._best_smooth_metrics[key] = _smooth_metrics[key] - monitor._best_raw_metrics[key] = _raw_metrics[key] + + ignore_this_epoch = False + if monitor._current_epoch is not None: + # If we are ignoring the monitor in the first few epochs then + # dont store the metrics + if monitor._current_epoch < monitor.ignore_first_epochs: + ignore_this_epoch = True + + if not ignore_this_epoch: + if monitor._best_smooth_metrics is None: + monitor._best_smooth_metrics = _smooth_metrics.copy() + monitor._best_raw_metrics = _raw_metrics.copy() + else: + for key in improved_keys: + monitor._best_smooth_metrics[key] = _smooth_metrics[key] + monitor._best_raw_metrics[key] = _raw_metrics[key] + monitor._best_epoch = epoch monitor._n_bad_epochs = 0 else: @@ -277,7 +325,6 @@ class Monitor(ub.NiceRepr): monitor.rel_threshold = 1e-6 rel_epsilon = 1.0 - monitor.rel_threshold improved_flags = (sign1 * current) < (rel_epsilon * sign2 * best) - # * rel_epsilon improved_keys = list(ub.compress(keys, improved_flags)) return improved_keys @@ -394,21 +441,35 @@ class Monitor(ub.NiceRepr): >>> monitor = demodata_monitor() >>> ranked_epochs = monitor._rank('loss', smooth=False) >>> ranked_epochs = monitor._rank('miou', smooth=True) + + >>> monitor = demodata_monitor(ignore_first_epochs=10) + >>> ranked_epochs = monitor._rank('loss', smooth=False) + >>> assert 1 not in ranked_epochs + >>> ranked_epochs = monitor._rank('miou', smooth=True) + >>> assert 1 not in ranked_epochs """ if smooth: metrics = monitor._smooth_metrics else: metrics = monitor._raw_metrics - values = [m[key] for m in metrics] - sortx = np.argsort(values) + epochs = np.array(monitor._epochs) + values = np.array([m[key] for m in metrics]) + is_valid = np.array( + [False if e is None else + int(e) >= int(monitor.ignore_first_epochs) + for e in monitor._epochs], dtype=bool) + + valid_values = values[is_valid] + valid_epochs = epochs[is_valid] + if key in monitor.maximize: - sortx = np.argsort(values)[::-1] + valid_sortx = np.argsort(valid_values)[::-1] elif key in monitor.minimize: - sortx = np.argsort(values) + valid_sortx = np.argsort(valid_values) else: raise KeyError(type) - ranked_epochs = np.array(monitor._epochs)[sortx] + ranked_epochs = valid_epochs[valid_sortx] return ranked_epochs def _BROKEN_rank_epochs(monitor): diff --git a/netharn/monkey.py b/netharn/monkey.py new file mode 100644 index 0000000000000000000000000000000000000000..5e77c5b288c4707b138e638bd54455a9ce009b8f --- /dev/null +++ b/netharn/monkey.py @@ -0,0 +1,10 @@ +""" +Handles monkey patching for system compatability +""" +import sys +if sys.version_info[0:2] >= (3, 10): + # Workaround for tensorboard_logger + import collections + from collections import abc + collections.MutableMapping = abc.MutableMapping + collections.MutableSequence = abc.MutableSequence diff --git a/netharn/util/__init__.py b/netharn/util/__init__.py index 6128445026bd1f976140ba99a71f068b8101e3a9..caa9d48a81d1f786da8c508e03d7114fd870a325 100644 --- a/netharn/util/__init__.py +++ b/netharn/util/__init__.py @@ -62,7 +62,7 @@ from .util_idstr import (compact_idstr, make_idstr, make_short_idstr,) from .util_inspect import (default_kwargs,) from .util_io import (read_arr, read_h5arr, write_arr, write_h5arr,) from .util_iter import (roundrobin,) -from .util_json import (IndexableWalker, LossyJSONEncoder, NumpyEncoder, +from .util_json import (LossyJSONEncoder, NumpyEncoder, ensure_json_serializable, read_json, walk_json, write_json,) from .util_misc import (FlatIndexer, SupressPrint, align, align_lines, @@ -80,7 +80,7 @@ from .util_zip import (split_archive, zopen,) __all__ = ['BatchNormContext', 'CumMovingAve', 'DisableBatchNorm', 'ExpMovingAve', 'FlatIndexer', 'IS_PROFILING', 'IgnoreLayerContext', - 'IndexableWalker', 'InternalRunningStats', 'LossyJSONEncoder', + 'InternalRunningStats', 'LossyJSONEncoder', 'ModuleMixin', 'MovingAve', 'NumpyEncoder', 'RunningStats', 'SlidingWindow', 'Stitcher', 'SupressPrint', 'WindowedMovingAve', 'absdev', 'adjust_gamma', 'adjust_subplots', 'aggensure', 'align', diff --git a/netharn/util/util_json.py b/netharn/util/util_json.py index 78eb6454e9f0101bdaa02553eab61d4c588a2883..821f5f9ac6c095f0aca6f5a3425882444b85ebd1 100644 --- a/netharn/util/util_json.py +++ b/netharn/util/util_json.py @@ -6,7 +6,6 @@ import six import torch import numpy as np import ubelt as ub -from collections.abc import Generator from collections import OrderedDict @@ -174,7 +173,7 @@ def ensure_json_serializable(dict_, normalize_containers=False, verbose=0): c = dict(c) return c - walker = IndexableWalker(dict_) + walker = ub.IndexableWalker(dict_) for prefix, value in walker: if isinstance(value, tuple): new_value = list(value) @@ -191,9 +190,9 @@ def ensure_json_serializable(dict_, normalize_containers=False, verbose=0): elif isinstance(value, (np.floating)): new_value = float(value) walker[prefix] = new_value - elif isinstance(value, (np.complex)): - new_value = complex(value) - walker[prefix] = new_value + # elif isinstance(value, (np.complex)): + # new_value = complex(value) + # walker[prefix] = new_value elif hasattr(value, '__json__'): new_value = value.__json__() walker[prefix] = new_value @@ -206,150 +205,3 @@ def ensure_json_serializable(dict_, normalize_containers=False, verbose=0): # normalize the outer layer dict_ = _norm_container(dict_) return dict_ - - -class IndexableWalker(Generator): - """ - DEPRECATED FOR THE VERSION IN UBELT - - Traverses through a nested tree-liked indexable structure. - - Generates a path and value to each node in the structure. The path is a - list of indexes which if applied in order will reach the value. - - The ``__setitem__`` method can be used to modify a nested value based on the - path returned by the generator. - - When generating values, you can use "send" to prevent traversal of a - particular branch. - - Example: - >>> # Create nested data - >>> import numpy as np - >>> data = ub.ddict(lambda: int) - >>> data['foo'] = ub.ddict(lambda: int) - >>> data['bar'] = np.array([1, 2, 3]) - >>> data['foo']['a'] = 1 - >>> data['foo']['b'] = np.array([1, 2, 3]) - >>> data['foo']['c'] = [1, 2, 3] - >>> data['baz'] = 3 - >>> print('data = {}'.format(ub.repr2(data, nl=True))) - >>> # We can walk through every node in the nested tree - >>> walker = IndexableWalker(data) - >>> for path, value in walker: - >>> print('walk path = {}'.format(ub.repr2(path, nl=0))) - >>> if path[-1] == 'c': - >>> # Use send to prevent traversing this branch - >>> got = walker.send(False) - >>> # We can modify the value based on the returned path - >>> walker[path] = 'changed the value of c' - >>> print('data = {}'.format(ub.repr2(data, nl=True))) - >>> assert data['foo']['c'] == 'changed the value of c' - """ - - def __init__(self, data, dict_cls=(dict,), list_cls=(list, tuple)): - self.data = data - self.dict_cls = dict_cls - self.list_cls = list_cls - self.indexable_cls = self.dict_cls + self.list_cls - - self._walk_gen = None - - def __iter__(self): - """ - Iterates through the indexable ``self.data`` - - Can send a False flag to prevent a branch from being traversed - - Yields: - Tuple[List, Any] : - path (List): list of index operations to arrive at the value - value (object): the value at the path - """ - return self - - def __next__(self): - """ returns next item from this generator """ - if self._walk_gen is None: - self._walk_gen = self._walk(self.data, prefix=[]) - return next(self._walk_gen) - - def send(self, arg): - """ - send(arg) -> send 'arg' into generator, - return next yielded value or raise StopIteration. - """ - # Note: this will error if called before __next__ - self._walk_gen.send(arg) - - def throw(self, type=None, value=None, traceback=None): - """ - throw(typ[,val[,tb]]) -> raise exception in generator, - return next yielded value or raise StopIteration. - """ - raise StopIteration - - def __setitem__(self, path, value): - """ - Set nested value by path - - Args: - path (List): list of indexes into the nested structure - value (object): new value - """ - d = self.data - *prefix, key = path - for k in prefix: - d = d[k] - d[key] = value - - def __delitem__(self, path): - """ - Remove nested value by path - - Note: - It can be dangerous to use this while iterating (because we may try - to descend into a deleted location) or on leaf items that are - list-like (because the indexes of all subsequent items will be - modified). - - Args: - path (List): list of indexes into the nested structure. - The item at the last index will be removed. - """ - d = self.data - *prefix, key = path - for k in prefix: - d = d[k] - del d[key] - - def _walk(self, data, prefix=[]): - """ - Defines the underlying generator used by IndexableWalker - """ - stack = [(data, prefix)] - while stack: - _data, _prefix = stack.pop() - # Create an items iterable of depending on the indexable data type - if isinstance(_data, self.list_cls): - items = enumerate(_data) - elif isinstance(_data, self.dict_cls): - items = _data.items() - else: - raise TypeError(type(_data)) - - for key, value in items: - # Yield the full path to this position and its value - path = _prefix + [key] - message = yield path, value - # If the value at this path is also indexable, then continue - # the traversal, unless the False message was explicitly sent - # by the caller. - if message is False: - # Because the `send` method will return the next value, - # we yield a dummy value so we don't clobber the next - # item in the traversal. - yield None - else: - if isinstance(value, self.indexable_cls): - stack.append((value, path)) diff --git a/netharn/util/util_tensorboard.py b/netharn/util/util_tensorboard.py index 04dd83dc0d2eb9221ec88313ce589f53fa2938fa..4215020a3c78c6bb2cb2f4b279e7c71569334704 100644 --- a/netharn/util/util_tensorboard.py +++ b/netharn/util/util_tensorboard.py @@ -17,7 +17,7 @@ def read_tensorboard_scalars(train_dpath, verbose=1, cache=1): event_paths = sorted(glob.glob(join(train_dpath, 'events.out.tfevents*'))) # make a hash so we will re-read of we need to cfgstr = ub.hash_data(list(map(ub.hash_file, event_paths))) if cache else '' - cacher = ub.Cacher('tb_scalars', cfgstr=cfgstr, enabled=cache, + cacher = ub.Cacher('tb_scalars', depends=cfgstr, enabled=cache, dpath=join(train_dpath, '_cache')) datas = cacher.tryload() if datas is None: diff --git a/publish.sh b/publish.sh index c0b1260acf0f143f61520f2524bb46b03c00d702..97e60bc0ae1eee85f8c2857654364ed30ef6c74e 100755 --- a/publish.sh +++ b/publish.sh @@ -1,10 +1,10 @@ #!/bin/bash -__heredoc__=''' +__doc__=''' Script to publish a new version of this library on PyPI. If your script has binary dependencies then we assume that you have built a proper binary wheel with auditwheel and it exists in the wheelhouse directory. -Otherwise, for source tarballs and universal wheels this script runs the +Otherwise, for source tarballs and wheels this script runs the setup.py script to create the wheels as well. Running this script with the default arguments will perform any builds and gpg @@ -12,10 +12,54 @@ signing, but nothing will be uploaded to pypi unless the user explicitly sets DO_UPLOAD=True or answers yes to the prompts. Args: - # These environment variables must / should be set - TWINE_USERNAME : username for pypi - TWINE_PASSWORD : password for pypi - DO_GPG : defaults to True + TWINE_USERNAME (str) : + username for pypi. This must be set if uploading to pypi. + Defaults to "". + + TWINE_PASSWORD (str) : + password for pypi. This must be set if uploading to pypi. + Defaults to "". + + DO_GPG (bool) : + If True, sign the packages with a GPG key specified by `GPG_KEYID`. + defaults to auto. + + DO_UPLOAD (bool) : + If True, upload the packages to the pypi server specified by + `TWINE_REPOSITORY_URL`. + + DO_BUILD (bool) : + If True, will execute the setup.py build script, which is + expected to use setuptools. In the future we may add support for other + build systems. If False, this script will expect the pre-built packages + to exist in "wheelhouse/{NAME}-{VERSION}-{SUFFIX}.{EXT}". + + Defaults to "auto". + + DO_TAG (bool) : + if True, will "git tag" the current HEAD with + + TWINE_REPOSITORY_URL (url) : + The URL of the pypi server to upload to. + Defaults to "auto", which if on the release branch, this will default + to the live pypi server `https://upload.pypi.org/legacy` otherwise + this will default to the test.pypi server: + `https://test.pypi.org/legacy` + + GPG_KEYID (str) : + The keyid of the gpg key to sign with. (if DO_GPG=True). Defaults to + the local git config user.signingkey + + DEPLOY_REMOTE (str) : + The git remote to push any tags to. Defaults to "origin" + + GPG_EXECUTABLE (path) : + Path to the GPG executable. + Defaults to "auto", which chooses "gpg2" if it exists, otherwise "gpg". + + MODE (str): + Can be pure, binary, or all. Defaults to pure unless a CMakeLists.txt + exists in which case it defaults to binary. Requirements: twine >= 1.13.0 @@ -28,17 +72,26 @@ Notes: # https://packaging.python.org/tutorials/distributing-packages/ # https://stackoverflow.com/questions/45188811/how-to-gpg-sign-a-file-that-is-built-by-travis-ci + Based on template in + + # github.com/Erotemic/xcookie/ + ~/code/xcookie/publish.sh + Usage: + load_secrets + # TODO: set a trap to unload secrets? cd - # Set your variables or load your secrets export TWINE_USERNAME= export TWINE_PASSWORD= TWINE_REPOSITORY_URL="https://test.pypi.org/legacy/" - - source $(secret_loader.sh) ''' +DEBUG=${DEBUG:=''} +if [[ "${DEBUG}" != "" ]]; then + set -x +fi + check_variable(){ KEY=$1 HIDE=$2 @@ -67,24 +120,34 @@ normalize_boolean(){ fi } + +#### +# Parameters +### + # Options DEPLOY_REMOTE=${DEPLOY_REMOTE:=origin} NAME=${NAME:=$(python -c "import setup; print(setup.NAME)")} VERSION=$(python -c "import setup; print(setup.VERSION)") -# The default should change depending on the application -#DEFAULT_MODE_LIST=("sdist" "universal" "bdist") -DEFAULT_MODE_LIST=("sdist" "native") -#DEFAULT_MODE_LIST=("sdist" "bdist") - check_variable DEPLOY_REMOTE ARG_1=$1 DO_UPLOAD=${DO_UPLOAD:=$ARG_1} DO_TAG=${DO_TAG:=$ARG_1} + DO_GPG=${DO_GPG:="auto"} +# Verify that we want to build +if [ "$DO_GPG" == "auto" ]; then + DO_GPG="True" +fi + DO_BUILD=${DO_BUILD:="auto"} +# Verify that we want to build +if [ "$DO_BUILD" == "auto" ]; then + DO_BUILD="True" +fi DO_GPG=$(normalize_boolean "$DO_GPG") DO_BUILD=$(normalize_boolean "$DO_BUILD") @@ -94,27 +157,78 @@ DO_TAG=$(normalize_boolean "$DO_TAG") TWINE_USERNAME=${TWINE_USERNAME:=""} TWINE_PASSWORD=${TWINE_PASSWORD:=""} -if [[ "$(cat .git/HEAD)" != "ref: refs/heads/release" ]]; then - # If we are not on release, then default to the test pypi upload repo - TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="https://test.pypi.org/legacy/"} +DEFAULT_TEST_TWINE_REPO_URL="https://test.pypi.org/legacy/" +DEFAULT_LIVE_TWINE_REPO_URL="https://upload.pypi.org/legacy/" + +TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="auto"} +if [[ "${TWINE_REPOSITORY_URL}" == "auto" ]]; then + #if [[ "$(cat .git/HEAD)" != "ref: refs/heads/release" ]]; then + # # If we are not on release, then default to the test pypi upload repo + # TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="https://test.pypi.org/legacy/"} + #else + if [[ "$DEBUG" == "" ]]; then + TWINE_REPOSITORY_URL="live" + else + TWINE_REPOSITORY_URL="test" + fi +fi + +if [[ "${TWINE_REPOSITORY_URL}" == "live" ]]; then + TWINE_REPOSITORY_URL=$DEFAULT_LIVE_TWINE_REPO_URL +elif [[ "${TWINE_REPOSITORY_URL}" == "test" ]]; then + TWINE_REPOSITORY_URL=$DEFAULT_TEST_TWINE_REPO_URL +fi + +GPG_EXECUTABLE=${GPG_EXECUTABLE:="auto"} +if [[ "$GPG_EXECUTABLE" == "auto" ]]; then + if [[ "$(which gpg2)" != "" ]]; then + GPG_EXECUTABLE="gpg2" + else + GPG_EXECUTABLE="gpg" + fi +fi + +GPG_KEYID=${GPG_KEYID:="auto"} +if [[ "$GPG_KEYID" == "auto" ]]; then + GPG_KEYID=$(git config --local user.signingkey) + if [[ "$GPG_KEYID" == "" ]]; then + GPG_KEYID=$(git config --global user.signingkey) + fi +fi + + +if [ -f CMakeLists.txt ] ; then + DEFAULT_MODE="binary" else - TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="https://upload.pypi.org/legacy/"} + DEFAULT_MODE="pure" fi -if [[ "$(which gpg2)" != "" ]]; then - GPG_EXECUTABLE=${GPG_EXECUTABLE:=gpg2} + +# TODO: parameterize +# The default should change depending on the application +MODE=${MODE:=$DEFAULT_MODE} +if [[ "$MODE" == "all" ]]; then + MODE_LIST=("sdist" "native" "bdist") +elif [[ "$MODE" == "pure" ]]; then + MODE_LIST=("sdist" "native") +elif [[ "$MODE" == "binary" ]]; then + MODE_LIST=("sdist" "bdist") else - GPG_EXECUTABLE=${GPG_EXECUTABLE:=gpg} + MODE_LIST=("$MODE") fi +MODE_LIST_STR=$(printf '"%s" ' "${MODE_LIST[@]}") +#echo "MODE_LIST_STR = $MODE_LIST_STR" -GPG_KEYID=${GPG_KEYID:=$(git config --local user.signingkey)} -GPG_KEYID=${GPG_KEYID:=$(git config --global user.signingkey)} -WAS_INTERACTION="False" +#### +# Logic +### +WAS_INTERACTION="False" echo " === PYPI BUILDING SCRIPT == +NAME='$NAME' VERSION='$VERSION' TWINE_USERNAME='$TWINE_USERNAME' TWINE_REPOSITORY_URL = $TWINE_REPOSITORY_URL @@ -124,6 +238,7 @@ DO_UPLOAD=${DO_UPLOAD} DO_TAG=${DO_TAG} DO_GPG=${DO_GPG} DO_BUILD=${DO_BUILD} +MODE_LIST_STR=${MODE_LIST_STR} " @@ -134,6 +249,7 @@ else if [[ "$DO_TAG" == "False" ]]; then echo "We are NOT about to tag VERSION='$VERSION'" else + # shellcheck disable=SC2162 read -p "Do you want to git tag and push version='$VERSION'? (input 'yes' to confirm)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" @@ -147,14 +263,6 @@ else fi -# Verify that we want to build -if [ "$DO_BUILD" == "auto" ]; then - DO_BUILD="True" -fi -# Verify that we want to build -if [ "$DO_GPG" == "auto" ]; then - DO_GPG="True" -fi if [[ "$DO_BUILD" == "True" ]]; then echo "About to build wheels" @@ -162,6 +270,7 @@ else if [[ "$DO_BUILD" == "False" ]]; then echo "We are NOT about to build wheels" else + # shellcheck disable=SC2162 read -p "Do you need to build wheels? (input 'yes' to confirm)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" @@ -178,6 +287,7 @@ else if [[ "$DO_UPLOAD" == "False" ]]; then echo "We are NOT about to directly publish VERSION='$VERSION'" else + # shellcheck disable=SC2162 read -p "Are you ready to directly publish version='$VERSION'? ('yes' will twine upload)" ANS echo "ANS = $ANS" WAS_INTERACTION="True" @@ -199,22 +309,12 @@ if [[ "$WAS_INTERACTION" == "True" ]]; then DO_TAG=${DO_TAG} DO_GPG=${DO_GPG} DO_BUILD=${DO_BUILD} + MODE_LIST_STR='${MODE_LIST_STR}' " - read -p "Look good? Ready? Enter any text to continue" ANS -fi - - - -MODE=${MODE:=all} - -if [[ "$MODE" == "all" ]]; then - MODE_LIST=("${DEFAULT_MODE_LIST[@]}") -else - MODE_LIST=("$MODE") + # shellcheck disable=SC2162 + read -p "Look good? Ready to build? Enter any text to continue" ANS fi -MODE_LIST_STR=$(printf '"%s" ' "${MODE_LIST[@]}") - if [ "$DO_BUILD" == "True" ]; then @@ -225,33 +325,19 @@ if [ "$DO_BUILD" == "True" ]; then echo "LIVE BUILDING" # Build wheel and source distribution - - #WHEEL_PATHS=() for _MODE in "${MODE_LIST[@]}" do echo "_MODE = $_MODE" if [[ "$_MODE" == "sdist" ]]; then python setup.py sdist || { echo 'failed to build sdist wheel' ; exit 1; } - WHEEL_PATH=$(ls dist/$NAME-$VERSION*.tar.gz) - #WHEEL_PATHS+=($WHEEL_PATH) elif [[ "$_MODE" == "native" ]]; then python setup.py bdist_wheel || { echo 'failed to build native wheel' ; exit 1; } - WHEEL_PATH=$(ls dist/$NAME-$VERSION*.whl) - #WHEEL_PATHS+=($WHEEL_PATH) - elif [[ "$_MODE" == "universal" ]]; then - python setup.py bdist_wheel --universal || { echo 'failed to build universal wheel' ; exit 1; } - UNIVERSAL_TAG="py3-none-any" - WHEEL_PATH=$(ls dist/$NAME-$VERSION-$UNIVERSAL_TAG*.whl) - #WHEEL_PATHS+=($WHEEL_PATH) elif [[ "$_MODE" == "bdist" ]]; then echo "Assume wheel has already been built" - WHEEL_PATH=$(ls wheelhouse/$NAME-$VERSION-*.whl) - #WHEEL_PATHS+=($WHEEL_PATH) else - echo "bad mode" + echo "ERROR: bad mode" exit 1 fi - echo "WHEEL_PATH = $WHEEL_PATH" done echo " @@ -263,36 +349,69 @@ else fi +ls_array(){ + __doc__=' + Read the results of a glob pattern into an array + + Args: + arr_name + glob_pattern + + Example: + arr_name="myarray" + glob_pattern="*" + pass + ' + local arr_name="$1" + local glob_pattern="$2" + shopt -s nullglob + # shellcheck disable=SC2206 + array=($glob_pattern) + shopt -u nullglob # Turn off nullglob to make sure it doesn't interfere with anything later + # FIXME; for some reason this doesnt always work properly + # Copy the array into the dynamically named variable + # shellcheck disable=SC2086 + readarray -t $arr_name < <(printf '%s\n' "${array[@]}") +} + + WHEEL_PATHS=() for _MODE in "${MODE_LIST[@]}" do - echo "_MODE = $_MODE" if [[ "$_MODE" == "sdist" ]]; then - WHEEL_PATH=$(ls dist/$NAME-$VERSION*.tar.gz) - WHEEL_PATHS+=($WHEEL_PATH) + ls_array "_NEW_WHEEL_PATHS" "dist/${NAME}-${VERSION}*.tar.gz" elif [[ "$_MODE" == "native" ]]; then - WHEEL_PATH=$(ls dist/$NAME-$VERSION*.whl) - WHEEL_PATHS+=($WHEEL_PATH) - elif [[ "$_MODE" == "universal" ]]; then - UNIVERSAL_TAG="py3-none-any" - WHEEL_PATH=$(ls dist/$NAME-$VERSION-$UNIVERSAL_TAG*.whl) - WHEEL_PATHS+=($WHEEL_PATH) + ls_array "_NEW_WHEEL_PATHS" "dist/${NAME}-${VERSION}*.whl" elif [[ "$_MODE" == "bdist" ]]; then - WHEEL_PATH=$(ls wheelhouse/$NAME-$VERSION-*.whl) - WHEEL_PATHS+=($WHEEL_PATH) + ls_array "_NEW_WHEEL_PATHS" "wheelhouse/${NAME}-${VERSION}-*.whl" else - echo "bad mode" + echo "ERROR: bad mode" exit 1 fi - echo "WHEEL_PATH = $WHEEL_PATH" + # hacky CONCAT because for some reason ls_array will return + # something that looks empty but has one empty element + for new_item in "${_NEW_WHEEL_PATHS[@]}" + do + if [[ "$new_item" != "" ]]; then + WHEEL_PATHS+=("$new_item") + fi + done done +# Dedup the paths +readarray -t WHEEL_PATHS < <(printf '%s\n' "${WHEEL_PATHS[@]}" | sort -u) + WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") +echo "WHEEL_PATHS_STR = $WHEEL_PATHS_STR" echo " + +GLOBED +------ MODE=$MODE VERSION='$VERSION' WHEEL_PATHS='$WHEEL_PATHS_STR' + " @@ -317,13 +436,13 @@ if [ "$DO_GPG" == "True" ]; then echo "Signing wheels" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" echo "GPG_SIGN_CMD = $GPG_SIGN_CMD" - $GPG_SIGN_CMD --output $WHEEL_PATH.asc $WHEEL_PATH + $GPG_SIGN_CMD --output "$WHEEL_PATH".asc "$WHEEL_PATH" echo "Checking wheels" - twine check $WHEEL_PATH.asc $WHEEL_PATH || { echo 'could not check wheels' ; exit 1; } + twine check "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not check wheels' ; exit 1; } echo "Verifying wheels" - $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH || { echo 'could not verify wheels' ; exit 1; } + $GPG_EXECUTABLE --verify "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not verify wheels' ; exit 1; } done echo " === === @@ -339,7 +458,7 @@ if [[ "$DO_TAG" == "True" ]]; then # git push origin :refs/tags/$TAG_NAME # and then tag with -f # - git tag $TAG_NAME -m "tarball tag $VERSION" + git tag "$TAG_NAME" -m "tarball tag $VERSION" git push --tags $DEPLOY_REMOTE echo "Should also do a: git push $DEPLOY_REMOTE main:release" echo "For github should draft a new release: https://github.com/PyUtils/line_profiler/releases/new" @@ -355,13 +474,13 @@ if [[ "$DO_UPLOAD" == "True" ]]; then for WHEEL_PATH in "${WHEEL_PATHS[@]}" do if [ "$DO_GPG" == "True" ]; then - twine upload --username $TWINE_USERNAME --password=$TWINE_PASSWORD \ - --repository-url $TWINE_REPOSITORY_URL \ - --sign $WHEEL_PATH.asc $WHEEL_PATH --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } + twine upload --username "$TWINE_USERNAME" --password=$TWINE_PASSWORD \ + --repository-url "$TWINE_REPOSITORY_URL" \ + --sign "$WHEEL_PATH".asc "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } else - twine upload --username $TWINE_USERNAME --password=$TWINE_PASSWORD \ - --repository-url $TWINE_REPOSITORY_URL \ - $WHEEL_PATH --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } + twine upload --username "$TWINE_USERNAME" --password=$TWINE_PASSWORD \ + --repository-url "$TWINE_REPOSITORY_URL" \ + "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } fi done echo """ @@ -369,7 +488,7 @@ if [[ "$DO_UPLOAD" == "True" ]]; then """ else echo """ - DRY RUN ... Skiping upload + DRY RUN ... Skipping upload DEPLOY_REMOTE = '$DEPLOY_REMOTE' DO_UPLOAD = '$DO_UPLOAD' @@ -381,7 +500,6 @@ else NAME='$NAME' TWINE_USERNAME='$TWINE_USERNAME' GPG_KEYID = '$GPG_KEYID' - MB_PYTHON_TAG = '$MB_PYTHON_TAG' To do live run set DO_UPLOAD=1 and ensure deploy and current branch are the same diff --git a/pyproject.toml b/pyproject.toml index fcd67c2d1dcb5f6b7ad5348004f02d3a3afe2030..8073d696ecdd50c7c7cf2fc51646655b64586029 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,25 @@ [build-system] -requires = ["setuptools", "wheel", "scikit-build", "cmake", "cython", "ninja", "ubelt", "cffi"] +requires = [ "setuptools>=41.0.1",] + +[tool.mypy] +ignore_missing_imports = true + +[tool.xcookie] +tags = [ "kitware", "gitlab", "purepy", "cv2",] +mod_name = "netharn" +repo_name = "netharn" +rel_mod_parent_dpath = "." +os = [ "linux", "win", "all", "osx",] +min_python = 3.7 + +[tool.pytest.ini_options] +addopts = "-p no:doctest --xdoctest --xdoctest-style=google --ignore-glob=setup.py" +norecursedirs = ".git ignore build __pycache__ dev _skbuild" +filterwarnings = [ "default", "ignore:.*No cfgstr given in Cacher constructor or call.*:Warning", "ignore:.*Define the __nice__ method for.*:Warning", "ignore:.*private pytest class or function.*:Warning",] + +[tool.coverage.run] +branch = true + +[tool.coverage.report] +exclude_lines = [ "pragma: no cover", ".* # pragma: no cover", ".* # nocover", "def __repr__", "raise AssertionError", "raise NotImplementedError", "if 0:", "if trace is not None", "verbose = .*", "^ *raise", "^ *pass *$", "if _debug:", "if __name__ == .__main__.:", ".*if six.PY2:",] +omit = [ "netharn/__main__.py", "*/setup.py",] diff --git a/requirements.txt b/requirements.txt index 4dda59107c1dd231a7ccf95555b7fe7123a1fc32..bc18a5033c334a259d3db88ea02beb3d0854dead 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,3 @@ --r requirements/super_setup.txt --r requirements/build.txt -r requirements/runtime.txt -r requirements/tests.txt -r requirements/optional.txt diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000000000000000000000000000000000..57d0dabc2f0882636877f8a646503b5e810881a3 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,8 @@ +sphinx >= 5.0.1 +sphinx-autobuild >= 2021.3.14 +sphinx_rtd_theme >= 1.0.0 +sphinxcontrib-napoleon >= 0.7 +sphinx-autoapi >= 1.8.4 +Pygments >= 2.9.0 +myst_parser >= 0.18.0 +sphinx-reredirects >= 0.0.1 diff --git a/requirements/graphics.txt b/requirements/graphics.txt new file mode 100644 index 0000000000000000000000000000000000000000..57af9e0656783f70ab1b03850a6af22b9ae9efab --- /dev/null +++ b/requirements/graphics.txt @@ -0,0 +1,9 @@ +# python ~/local/tools/supported_python_versions_pip.py opencv-python +opencv-python>=4.5.4.58 ; python_version >= '3.10' # Python 3.10+ +opencv-python>=3.4.15.55 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +opencv-python>=3.4.15.55 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +opencv-python>=3.4.15.55 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +opencv-python>=3.4.13.47 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 +opencv-python>=3.1.0.2 ; python_version < '3.6' and python_version >= '3.5' # Python 3.5 +opencv-python>=3.1.0.5 ; python_version < '3.5' and python_version >= '3.4' # Python 3.4 +opencv-python>=3.1.0.0 ; python_version < '3.4' and python_version >= '2.7' # Python 2.7 diff --git a/requirements/headless.txt b/requirements/headless.txt new file mode 100644 index 0000000000000000000000000000000000000000..14f0c3f07fd3d5a850adb99994ca6b7a1baeea1f --- /dev/null +++ b/requirements/headless.txt @@ -0,0 +1,9 @@ +# python ~/local/tools/supported_python_versions_pip.py opencv-python-headless +opencv-python-headless>=4.5.4.58 ; python_version >= '3.10' # Python 3.10+ +opencv-python-headless>=3.4.13.47 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +opencv-python-headless>=3.4.13.47 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +opencv-python-headless>=3.4.13.47 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +opencv-python-headless>=3.4.13.47 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 +opencv-python-headless>=3.4.11.39 ; python_version < '3.6' and python_version >= '3.5' # Python 3.5 +opencv-python-headless>=3.4.2.16 ; python_version < '3.5' and python_version >= '3.4' # Python 3.4 +opencv-python-headless>=3.4.2.16 ; python_version < '3.4' and python_version >= '2.7' # Python 2.7 diff --git a/requirements/optional.txt b/requirements/optional.txt index 2a8035b8ca9fc3c393eb81010a4bacb224ec368f..8e073b6c2d6b522b3da16e34bc435609208f868f 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,13 +1,47 @@ -pandas >= 0.23.3 +pandas>=1.4.0 ; python_version >= '3.10' # Python 3.10+ +pandas>=1.4.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +pandas>=1.4.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +pandas>=1.2.0 ; python_version < '3.8' and python_version >= '3.7.1' # Python 3.7.1 +pandas>=1.1.4 ; python_version < '3.7.1' and python_version >= '3.7' # Python 3.7 +pandas>=1.1.4 ; python_version < '3.7' and python_version >= '3.6.1' # Python 3.6.1 +pandas>=1.1.4 ; python_version < '3.6.1' and python_version >= '3.6' # Python 3.6 + tqdm >= 4.23.4 -Pillow >= 5.2.0 -opencv-python >= 3.4.1 -matplotlib >= 2.2.2 -seaborn>=0.9.0 -h5py >= 2.8.0 + +Pillow>=9.1.0 ; python_version >= '3.10' # Python 3.10+ +Pillow>=9.1.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +Pillow>=8.0.1 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +Pillow>=8.0.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +Pillow>=8.0.0 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 + +# opencv-python >= 3.4.1 + +matplotlib>=3.5.0 ; python_version >= '3.10' # Python 3.10+ +matplotlib>=3.5.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +matplotlib>=3.4.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +matplotlib>=3.4.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +matplotlib>=3.1.0 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 +matplotlib>=3.0.0 ; python_version < '3.6' and python_version >= '3.5' # Python 3.5 + +seaborn>=0.10.0 ; python_version >= '3.6' # Python 3.6+ +seaborn>=0.9.1 ; python_version < '3.6' and python_version >= '2.7' # Python 2.7 + +# h5py >= 2.8.0 protobuf >= 3.6.0 -scikit-learn >= 0.19.1 -scipy >= 1.2.1 +# scikit-learn >= 0.19.1 +scikit-learn>=1.0.2 ; python_version >= '3.10' # Python 3.10+ +scikit-learn>=1.0.2 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +scikit-learn>=1.0.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +scikit-learn>=0.24.1 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +scikit-learn>=0.24.1 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 + + +scipy>=1.8.0 ; python_version >= '3.10' # Python 3.10+ +scipy>=1.8.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +scipy>=1.8.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +scipy>=1.6.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +scipy>=1.5.4 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 + psutil >= 5.4.7 Pygments >= 2.2.0 @@ -15,11 +49,12 @@ tensorboard_logger >= 0.1.0 tensorboard >= 1.8.0 sympy >= 1.3 -ndsampler >= 0.5.7 -kwcoco >= 0.1.0 +ndsampler >= 0.6.7 +kwcoco >= 0.2.31 # pyqt5>= 5.11.2;python_version>'2.7' # colormath -torch-optimizer >= 0.0.1a9 ;python_version>='3.6' +#torch-optimizer >= 0.0.1a9 ;python_version>='3.6' +torch-optimizer >= 0.3.0; python_version>='3.6' diff --git a/requirements/runtime.txt b/requirements/runtime.txt index a9717e4be41a3267ce9274b179e9c529a344edd5..8fd24af528dc68bf48f08296b9724aa2526fcba6 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -5,14 +5,34 @@ # install torch yourself on windows. Hopefully there will be a better alternative # in the future. -torch >= 1.0.0 -torchvision >= 0.2.0 +# python ~/local/tools/supported_python_versions_pip.py numpy + +torch>=1.11.0 ; python_version >= '3.10' # Python 3.10+ +torch>=1.11.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +torch>=1.7.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +torch>=1.7.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +torch>=1.7.0 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 + +# torchvision req table +# https://github.com/pytorch/vision +torchvision>=0.12.0 ; python_version >= '3.10' # Python 3.10+ +torchvision>=0.12.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +torchvision>=0.8.1 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +torchvision>=0.8.1 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +torchvision>=0.8.1 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 six >= 1.11.0 -numpy >= 1.9.0 -ubelt >= 0.10.0 + +numpy>=1.21.6 ; python_version >= '3.10' # Python 3.10+ +numpy>=1.21.4 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +numpy>=1.19.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +numpy>=1.19.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +numpy>=1.19.2 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 + +ubelt >= 1.1.2 + parse >= 1.8.4 -pyflakes >= 1.6.0 +pyflakes >= 2.4.0 astunparse >= 1.6.1 pygtrie >= 2.3.3 @@ -23,10 +43,10 @@ imageio < 2.8.0;python_version < '3.0' # imgaug < 0.3.0;python_version < '3.0' # NOTE: in the future kwimage and kwplot may become optional -scriptconfig >= 0.4.0 -kwarray >= 0.4.0 -kwimage >= 0.4.0 -kwplot >= 0.4.0 +scriptconfig >= 0.5.8 +kwarray >= 0.6.0 +kwimage >= 0.9.2 +kwplot >= 0.4.12 qualname>=0.1.0;python_version < '3.0' torch_liberator >= 0.0.4 diff --git a/requirements/tests.txt b/requirements/tests.txt index 9cc46be8e1f12ccc9fb3eaf74c1730c7bb7f5d30..07176d9bf1f5aad4b16f9510655fdbc9b988a523 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,4 +1,18 @@ -xdoctest >= 0.8.3 -pytest >= 3.6.3 -pytest-cov >= 2.5.1 -coverage >= 4.3.4 +# See ~/local/tools/supported_python_versions_pip.py for helper script +pytest >= 6.2.5 ; python_version >= '3.10.0' # Python 3.10+ +pytest >= 4.6.0 ; python_version < '3.10.0' and python_version >= '3.7.0' # Python 3.7-3.9 +pytest >= 4.6.0 ; python_version < '3.7.0' and python_version >= '3.6.0' # Python 3.6 +pytest >= 4.6.0, <= 6.1.2 ; python_version < '3.6.0' and python_version >= '3.5.0' # Python 3.5 +pytest >= 4.6.0, <= 4.6.11 ; python_version < '3.5.0' and python_version >= '3.4.0' # Python 3.4 +pytest >= 4.6.0, <= 4.6.11 ; python_version < '2.8.0' and python_version >= '2.7.0' # Python 2.7 + +coverage>=5.2.1 ; python_version >= '3.6' # Python 3.6+ +coverage>=4.4.0 ; python_version < '3.6' and python_version >= '2.7' # Python 2.7 + + +xdoctest >= 0.15.10 + +pytest-cov >= 3.0.0 ; python_version >= '3.6.0' # Python 3.6+ +pytest-cov >= 2.9.0 ; python_version < '3.6.0' and python_version >= '3.5.0' # Python 3.5 +pytest-cov >= 2.8.1 ; python_version < '3.5.0' and python_version >= '3.4.0' # Python 3.4 +pytest-cov >= 2.8.1 ; python_version < '2.8.0' and python_version >= '2.7.0' # Python 2.7 diff --git a/run_doctests.sh b/run_doctests.sh index 4709422ea3cf3755be6b8968abb1b8254d8909a0..5126b094076785f1abfe623b6e9b67792a7b40e6 100755 --- a/run_doctests.sh +++ b/run_doctests.sh @@ -1,2 +1,2 @@ #!/bin/bash -xdoctest netharn --style=google all $@ +xdoctest netharn --style=google all diff --git a/run_linter.sh b/run_linter.sh new file mode 100755 index 0000000000000000000000000000000000000000..a41bb05ea08853e61ca710b157ab6d5859014f3f --- /dev/null +++ b/run_linter.sh @@ -0,0 +1,2 @@ +#!/bin/bash +flake8 ./netharn --count --select=E9,F63,F7,F82 --show-source --statistics diff --git a/run_tests.py b/run_tests.py index e97ca316fd1d1737bb23532a6950fd44b3f326f0..babad7151f5965db869b19111b16360ff01f66fd 100755 --- a/run_tests.py +++ b/run_tests.py @@ -1,19 +1,17 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- if __name__ == '__main__': import pytest import sys package_name = 'netharn' + mod_dpath = package_name + test_dpath = 'tests' pytest_args = [ - '-p', 'no:doctest', - '-s', - '-v', '--cov-config', '.coveragerc', '--cov-report', 'html', '--cov-report', 'term', '--xdoctest', '--cov=' + package_name, - package_name, + mod_dpath, test_dpath ] pytest_args = pytest_args + sys.argv[1:] sys.exit(pytest.main(pytest_args)) diff --git a/setup.py b/setup.py index a74cb1c23b247218237d67b8bace01896c30912c..a4ecb94bd78c44503d3466bd531a061ef25ac74f 100755 --- a/setup.py +++ b/setup.py @@ -3,34 +3,43 @@ # NOTE: pip install -U --pre h5py from __future__ import absolute_import, division, print_function import sys -from os.path import dirname from setuptools import find_packages from os.path import exists -from os.path import join -import glob -import os from setuptools import setup -# from skbuild import setup def parse_version(fpath): """ Statically parse the version number from a python file """ + value = static_parse('__version__', fpath) + return value + + +def static_parse(varname, fpath): + """ + Statically parse the a constant variable from a python file + """ import ast if not exists(fpath): raise ValueError('fpath={!r} does not exist'.format(fpath)) with open(fpath, 'r') as file_: sourcecode = file_.read() pt = ast.parse(sourcecode) - class VersionVisitor(ast.NodeVisitor): + class StaticVisitor(ast.NodeVisitor): def visit_Assign(self, node): for target in node.targets: - if getattr(target, 'id', None) == '__version__': - self.version = node.value.s - visitor = VersionVisitor() + if getattr(target, 'id', None) == varname: + self.static_value = node.value.s + visitor = StaticVisitor() visitor.visit(pt) - return visitor.version + try: + value = visitor.static_value + except AttributeError: + import warnings + value = 'Unknown {}'.format(varname) + warnings.warn(value) + return value def parse_description(): @@ -45,97 +54,103 @@ def parse_description(): readme_fpath = join(dirname(__file__), 'README.rst') # This breaks on pip install, so check that it exists. if exists(readme_fpath): - try: - with open(readme_fpath, 'r') as f: - text = f.read() - return text - except Exception as ex: - import warnings - warnings.warn('unable to parse existing readme: {!r}'.format(ex)) + with open(readme_fpath, 'r') as f: + text = f.read() + return text return '' -def parse_requirements(fname='requirements.txt', with_version=False): +def parse_requirements(fname='requirements.txt', versions=False): """ Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file - with_version (bool, default=False): if true include version specs + versions (bool | str, default=False): + If true include version specs. + If strict, then pin to the minimum version. Returns: List[str]: list of requirements items - - References: - https://pip.readthedocs.io/en/1.1/requirements.html - - CommandLine: - python -c "import setup; print(setup.parse_requirements())" - python -c "import setup; print(chr(10).join(setup.parse_requirements(with_version=True)))" """ - from os.path import exists + from os.path import exists, dirname, join import re require_fpath = fname - def parse_line(line, base='.'): + def parse_line(line, dpath=''): """ Parse information from a line in a requirements text file + + line = 'git+https://a.com/somedep@sometag#egg=SomeDep' + line = '-e git+https://a.com/somedep@sometag#egg=SomeDep' """ - if line.startswith(('-f ', '--find-links ', '--index-url ')): - import warnings - warnings.warn( - 'requirements file specified alternative index urls, but ' - 'there is currently no way to support this in setuptools') - elif line.startswith('-r '): + # Remove inline comments + comment_pos = line.find(' #') + if comment_pos > -1: + line = line[:comment_pos] + + if line.startswith('-r '): # Allow specifying requirements in other files - new_fname = line.split(' ')[1] - new_fpath = join(base, new_fname) - for info in parse_require_file(new_fpath): + target = join(dpath, line.split(' ')[1]) + for info in parse_require_file(target): yield info else: + # See: https://www.python.org/dev/peps/pep-0508/ info = {'line': line} if line.startswith('-e '): info['package'] = line.split('#egg=')[1] else: + if ';' in line: + pkgpart, platpart = line.split(';') + # Handle platform specific dependencies + # setuptools.readthedocs.io/en/latest/setuptools.html + # #declaring-platform-specific-dependencies + plat_deps = platpart.strip() + info['platform_deps'] = plat_deps + else: + pkgpart = line + platpart = None + # Remove versioning from the package pat = '(' + '|'.join(['>=', '==', '>']) + ')' - parts = re.split(pat, line, maxsplit=1) + parts = re.split(pat, pkgpart, maxsplit=1) parts = [p.strip() for p in parts] info['package'] = parts[0] if len(parts) > 1: op, rest = parts[1:] - if ';' in rest: - # Handle platform specific dependencies - # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies - version, platform_deps = map(str.strip, rest.split(';')) - info['platform_deps'] = platform_deps - else: - version = rest # NOQA + version = rest # NOQA info['version'] = (op, version) yield info def parse_require_file(fpath): - base = dirname(fpath) + dpath = dirname(fpath) with open(fpath, 'r') as f: for line in f.readlines(): line = line.strip() if line and not line.startswith('#'): - for info in parse_line(line, base): + for info in parse_line(line, dpath=dpath): yield info def gen_packages_items(): if exists(require_fpath): for info in parse_require_file(require_fpath): parts = [info['package']] - if with_version and 'version' in info: - parts.extend(info['version']) + if versions and 'version' in info: + if versions == 'strict': + # In strict mode, we pin to the minimum version + if info['version']: + # Only replace the first >= instance + verstr = ''.join(info['version']).replace('>=', '==', 1) + parts.append(verstr) + else: + parts.extend(info['version']) if not sys.version.startswith('3.4'): # apparently package_deps are broken in 3.4 - platform_deps = info.get('platform_deps') - if platform_deps is not None: - parts.append(';' + platform_deps) + plat_deps = info.get('platform_deps') + if plat_deps is not None: + parts.append(';' + plat_deps) item = ''.join(parts) yield item @@ -143,96 +158,35 @@ def parse_requirements(fname='requirements.txt', with_version=False): return packages -def clean_repo(repodir, modname, rel_paths=[]): - """ - repodir = ub.expandpath('~/code/netharn/') - modname = 'netharn' - rel_paths = [ - 'netharn/util/nms/cpu_nms.c', - 'netharn/util/nms/cpu_nms.c', - 'netharn/util/nms/cpu_nms.cpp', - 'netharn/util/nms/cython_boxes.c', - 'netharn/util/nms/cython_boxes.html', - ] - """ - print('cleaning repo: {}/{}'.format(repodir, modname)) - toremove = [] - for root, dnames, fnames in os.walk(repodir): - - if os.path.basename(root) == modname + '.egg-info': - toremove.append(root) - del dnames[:] - - if os.path.basename(root) == '__pycache__': - toremove.append(root) - del dnames[:] - - if os.path.basename(root) == '_ext': - # Remove torch extensions - toremove.append(root) - del dnames[:] - - if os.path.basename(root) == 'build': - # Remove python c extensions - if len(dnames) == 1 and dnames[0].startswith('temp.'): - toremove.append(root) - del dnames[:] - - # Remove simple pyx inplace extensions - for fname in fnames: - if fname.endswith('.pyc'): - toremove.append(join(root, fname)) - if fname.endswith(('.so', '.c', '.o')): - if fname.split('.')[0] + '.pyx' in fnames: - toremove.append(join(root, fname)) - - def enqueue(d): - if exists(d) and d not in toremove: - toremove.append(d) - - import six - if six.PY2: - abs_paths = [join(repodir, p) for pat in rel_paths - for p in glob.glob(pat)] - else: - abs_paths = [join(repodir, p) for pat in rel_paths - for p in glob.glob(pat, recursive=True)] - for abs_path in abs_paths: - enqueue(abs_path) - - import ubelt as ub - for dpath in toremove: - # print('Removing dpath = {!r}'.format(dpath)) - ub.delete(dpath, verbose=1) - - -def clean(): - """ - __file__ = ub.expandpath('~/code/netharn/setup.py') - """ - modname = 'netharn' - repodir = dirname(__file__) - rel_paths = [ - 'htmlcov', - '_skbuild', - '_build_wheel', - 'netharn.egg-info', - 'dist', - 'build', - '**/*.pyc', - 'profile*' - 'pip-wheel-metadata', - ] - clean_repo(repodir, modname, rel_paths) - - VERSION = version = parse_version('netharn/__init__.py') # needs to be a global var for git tags NAME = 'netharn' if __name__ == '__main__': - if 'clean' in sys.argv: - clean() - # sys.exit(0) + + setupkw = {} + setupkw["install_requires"] = parse_requirements("requirements/runtime.txt") + setupkw["extras_require"] = { + "all": parse_requirements("requirements.txt"), + "tests": parse_requirements("requirements/tests.txt"), + "optional": parse_requirements("requirements/optional.txt"), + "headless": parse_requirements("requirements/headless.txt"), + "graphics": parse_requirements("requirements/graphics.txt"), + # Strict versions + "headless-strict": parse_requirements( + "requirements/headless.txt", versions="strict" + ), + "graphics-strict": parse_requirements( + "requirements/graphics.txt", versions="strict" + ), + "all-strict": parse_requirements("requirements.txt", versions="strict"), + "runtime-strict": parse_requirements( + "requirements/runtime.txt", versions="strict" + ), + "tests-strict": parse_requirements("requirements/tests.txt", versions="strict"), + "optional-strict": parse_requirements( + "requirements/optional.txt", versions="strict" + ), + } setup( name=NAME, @@ -243,16 +197,11 @@ if __name__ == '__main__': description='Train and deploy pytorch models', long_description=parse_description(), long_description_content_type='text/x-rst', - install_requires=parse_requirements('requirements/runtime.txt'), - extras_require={ - 'all': parse_requirements('requirements.txt'), - 'optional': parse_requirements('requirements/optional.txt'), - 'tests': parse_requirements('requirements/tests.txt'), - }, packages=find_packages(include='netharn.*'), package_data={ 'netharn.initializers._nx_ext_v2': ['*.pyx'], }, + python_requires='>=3.6', license='Apache 2', classifiers=[ # List of classifiers available at: @@ -268,6 +217,11 @@ if __name__ == '__main__': # This should be interpreted as Apache License v2.0 'License :: OSI Approved :: Apache Software License', # Supported Python versions - 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', ], + **setupkw, ) diff --git a/super_setup.py b/super_setup.py index 51b6b8bcab4be18c861900c4d67a26bfb741b545..90bd84ae6f76e0e4f2405ed8da38e251e0cd5ebb 100755 --- a/super_setup.py +++ b/super_setup.py @@ -251,6 +251,8 @@ class Repo(ub.NiceRepr): >>> repo._cmd('./run_doctests.sh') repo = + Example: + >>> # xdoctest: +SKIP >>> # Here is a less simple example referencing ubelt >>> from super_setup import * >>> import ubelt as ub @@ -472,6 +474,7 @@ class Repo(ub.NiceRepr): Look for a "dev" branch with a higher version number and switch to that. Example: + >>> # xdoctest: +SKIP >>> from super_setup import * >>> import ubelt as ub >>> repo = Repo.demo() @@ -850,7 +853,7 @@ DEVEL_REPOS = [ 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/kwarray.git'}, }, { - 'name': 'kwimage', 'branch': 'dev/0.8.3', 'remote': 'origin', + 'name': 'kwimage', 'branch': 'dev/0.8.7', 'remote': 'origin', 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/kwimage.git'}, }, # TODO: @@ -859,11 +862,11 @@ DEVEL_REPOS = [ # 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/kwannot.git'}, # }, { - 'name': 'kwcoco', 'branch': 'dev/0.2.26', 'remote': 'origin', + 'name': 'kwcoco', 'branch': 'dev/0.2.32', 'remote': 'origin', 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/kwcoco.git'}, }, { - 'name': 'kwplot', 'branch': 'dev/0.4.12', 'remote': 'origin', + 'name': 'kwplot', 'branch': 'dev/0.4.13', 'remote': 'origin', 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/kwplot.git'}, }, @@ -883,13 +886,13 @@ DEVEL_REPOS = [ 'remotes': {'origin': 'git@gitlab.kitware.com:utils/scriptconfig.git'}, }, { - 'name': 'ndsampler', 'branch': 'dev/0.6.7', 'remote': 'origin', + 'name': 'ndsampler', 'branch': 'dev/0.6.8', 'remote': 'origin', 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/ndsampler.git'}, }, # netharn - training harness { - 'name': 'netharn', 'branch': 'dev/0.5.18', 'remote': 'origin', + 'name': 'netharn', 'branch': 'dev/0.5.19', 'remote': 'origin', 'remotes': {'origin': 'git@gitlab.kitware.com:computer-vision/netharn.git'}, }, ] diff --git a/tests/test_iter_idx.py b/tests/test_iter_idx.py index 88e555995c79e12ae24065bc14ce6cfa5afac5c8..8c7fa96218bc3137a2389231f80df2d4cd2626c6 100644 --- a/tests/test_iter_idx.py +++ b/tests/test_iter_idx.py @@ -79,7 +79,7 @@ def test_iter_idx(): hyper = { # --- data first 'datasets' : datasets, - 'nice' : 'test_iter_idx', + 'name' : 'test_iter_idx', 'workdir' : ub.ensure_app_cache_dir('netharn/test/test_iter_idx'), 'loaders' : {'batch_size': 1}, 'xpu' : nh.XPU.coerce('cpu'), @@ -92,8 +92,17 @@ def test_iter_idx(): 'monitor' : (nh.Monitor, {'max_epoch': 10}), } harn1 = MyHarn(hyper=hyper) - harn1.preferences['use_tensorboard'] = True - harn1.preferences['eager_dump_tensorboard'] = True + + try: + import tensorboard # NOQA + from tensorboard.backend.event_processing import event_accumulator # NOQA + except Exception: + use_tensorboard = False + else: + use_tensorboard = True + + harn1.preferences['use_tensorboard'] = use_tensorboard + harn1.preferences['eager_dump_tensorboard'] = use_tensorboard harn1.intervals['log_iter_train'] = 1 harn1.intervals['log_iter_vali'] = 1