Commit c4d51e83 authored by Matt Brown's avatar Matt Brown
Browse files

Update.

parent db30f8d7
......@@ -2,6 +2,7 @@ FROM ubuntu:18.04
# install packages
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
curl \
git \
sudo \
......@@ -12,6 +13,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libx11-6 \
libxxf86vm1 \
build-essential \
libgl1-mesa-glx \
libqt5x11extras5 \
libgl1-mesa-dev xvfb \
&& rm -rf /var/lib/apt/lists/*
# Create a non-root user and switch to it. Running X11 applications as root does
......@@ -23,7 +27,7 @@ USER user
WORKDIR /home/user
# Install Miniconda
RUN curl -so ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh \
RUN wget -O ~/miniconda.sh https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh \
&& chmod +x ~/miniconda.sh \
&& ~/miniconda.sh -b -p ~/miniconda \
&& rm ~/miniconda.sh
......@@ -34,16 +38,22 @@ ENV CONDA_AUTO_UPDATE_CONDA=false
RUN conda install -y -c menpo opencv3=3.1.0
RUN conda clean -ya
RUN conda install -c anaconda vtk
RUN pip install transformations
RUN conda install -c anaconda vtk==8.2.0
RUN conda install gdal==2.3.3
RUN conda install scipy==1.5.2
RUN conda install shapely==1.6.4
RUN conda install pyyaml==5.3.1
RUN conda install matplotlib
RUN conda install -c conda-forge pyembree==0.1.6
RUN conda install natsort
RUN conda install -c conda-forge trimesh
RUN pip install transformations==2020.1.1
RUN pip install kwcoco
# -----------------------------------------------------------------------------
# Additional installs so that Spyder can connect to the remote kernel and can
# view Matplotlib results.
RUN conda install -c anaconda spyder-kernels==1.9.4
RUN conda install matplotlib==3.3.1
RUN mkdir -p /home/user/.local/share/jupyter/runtime
# -----------------------------------------------------------------------------
......@@ -51,3 +61,5 @@ RUN mkdir -p /home/user/.local/share/jupyter/runtime
# -----------------------------------------------------------------------------
# Set path Python modules.
ENV PYTHONPATH "${PYTHONPATH}:/home/user/adapt_postprocessing/src/colmap_processing/src"
ENV DISPLAY 99.0
......@@ -42,6 +42,8 @@ docker run --rm -it \
-v "$HOME/.config:$HOME/.config" \
-v "$host_kernel_dir:$host_kernel_dir" \
-v "$DIR/../:/home/user/adapt_postprocessing" \
-v "/media:/media" \
--privileged \
--name $container_name \
$IMAGENAME \
/bin/bash -c "python -m spyder_kernels.console --pylab=auto --matplotlib=auto -f $kernel_name"
......
......@@ -25,19 +25,30 @@ function start_container()
# If the container does not exist, start it.
echo "Starting new container"
# Open up xhost (for display) only to the container.
xhost +local:`docker inspect --format='{{ .Config.Hostname }}' $container_name`
HOST_UID=$(echo $UID)
HOST_GID=$(id -g)
# Maping /tmp/.X11-unix allows graphics to be passed.
docker run --rm -dt \
--gpus all \
--network="host" \
-e "DISPLAY" \
-e "SDL_VIDEODRIVER" \
-e="QT_X11_NO_MITSHM=1" \
-v "/tmp/.X11-unix:/tmp/.X11-unix" \
--user $(id -u):$(id -g) \
-v="$XAUTH:$XAUTH" \
-v "/etc/group:/etc/group:ro" \
-v "/etc/passwd:/etc/passwd:ro" \
-v "/etc/shadow:/etc/shadow:ro" \
-v "$HOME/.config:$HOME/.config" \
-v "$HOME/.local:$HOME/.local" \
-v "$DIR/../:/home/user/adapt_postprocessing" \
-v "/media:/media" \
--privileged \
--name $container_name \
$image_name \
/bin/bash
......
......@@ -40,271 +40,178 @@ import matplotlib.pyplot as plt
import glob
from osgeo import osr, gdal
from scipy.optimize import fmin, minimize, fminbound
import transformations
# Colmap Processing imports.
import colmap_processing.vtk_util as vtk_util
from colmap_processing.geo_conversions import llh_to_enu
from colmap_processing.colmap_interface import read_images_binary, Image, \
read_points3d_binary, read_cameras_binary, qvec2rotmat
from colmap_processing.database import COLMAPDatabase, pair_id_to_image_ids, blob_to_array
import colmap_processing.vtk_util as vtk_util
from colmap_processing.geo_conversions import enu_to_llh, llh_to_enu, \
rmat_ecef_enu, rmat_enu_ecef
from colmap_processing.static_camera_model import save_static_camera, \
load_static_camera_from_file, write_camera_krtd_file
from colmap_processing.camera_models import StandardCamera
from colmap_processing.platform_pose import PlatformPoseInterp
from colmap_processing.world_models import WorldModelMesh
# ----------------------------------------------------------------------------
data_dir = '/home_user/adapt_postprocessing/data'
# Define the directory where all of the relavant COLMAP files are saved.
# If you are placing your data within the 'data' folder of this repository,
# this will be mapped to '/home_user/adapt_postprocessing/data' inside the
# Docker container.
data_dir = '/media/mattb/7e7167ba-ad6f-4720-9ced-b699f49ba3aa/noaa_adapt/change_detection'
# Reference image.
cm_dir = 'uav_video'
image_fname = '%s/ref_view.png' % cm_dir
image_subdirs = ['Circle-sUAS-Visible-Responder-2017_05_05_afternoon_flight',
'Circle-sUAS-Visible-Responder-2017_05_08_noon_flight']
# File containing correspondences between image points (columns 0 an d1) and
# latitude (degrees), longtigude (degrees), and height above the WGS84
# ellipsoid (meters) (columns 2-4).
points_fname = '%s/points.txt' % cm_dir
# COLMAP data directory.
images_bin_fname = '%s/images.bin' % data_dir
camera_bin_fname = '%s/cameras.bin' % data_dir
save_dir = os.path.split(image_fname)[0]
location = 'khq'
# Ground mesh filename.
#mesh_fname = '%s/uav_circle.ply' % data_dir
mesh_fname = '/home/mattb/3d_models/uav_circle.ply'
# VTK renderings are limited to monitor resolution (width x height).
monitor_resolution = (2200, 1200)
dist = [0, 0, 0, 0]
optimize_k1 = True
optimize_k2 = False
optimize_k3 = False
optimize_k4 = False
fix_principal_point = False
fix_aspect_ratio = True
# ----------------------------------------------------------------------------
if location == 'khq':
# Meshed 3-D model used to render an synthetic view for sanity checking and
# to produce the depth map.
mesh_fname = 'mesh.ply'
mesh_lat0 = 42.86453893 # degrees
mesh_lon0 = -73.77125128 # degrees
mesh_h0 = 73 # meters above WGS84 ellipsoid
else:
raise Exception('Unrecognized location \'%s\'' % location)
# Load in point correspondences.
ret = np.loadtxt(points_fname)
im_pts = ret[:, :2]
llh = ret[:, 2:]
enu_pts = [llh_to_enu(_[0], _[1], _[2], mesh_lat0, mesh_lon0, mesh_h0)
for _ in llh]
enu_pts = np.array(enu_pts)
# Load in the image.
real_image = cv2.imread(image_fname)
if real_image.ndim == 3:
real_image = real_image[:, :, ::-1]
height, width = real_image.shape[:2]
dist = np.array(dist, dtype=np.float32)
# ------------------------------- First Pass ---------------------------------
# Set optimization parameters for first pass.
flags = cv2.CALIB_ZERO_TANGENT_DIST
flags = flags | cv2.CALIB_USE_INTRINSIC_GUESS
if fix_aspect_ratio:
flags = flags | cv2.CALIB_FIX_ASPECT_RATIO
flags = flags | cv2.CALIB_FIX_PRINCIPAL_POINT
flags = flags | cv2.CALIB_FIX_K1
flags = flags | cv2.CALIB_FIX_K2
flags = flags | cv2.CALIB_FIX_K3
flags = flags | cv2.CALIB_FIX_K4
flags = flags | cv2.CALIB_FIX_K5
flags = flags | cv2.CALIB_FIX_K6
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30000,
0.0000001)
# Try a bunch of different initial guesses for focal length.
results = []
for f in np.logspace(0, 5, 100):
K = np.identity(3)
K[0, 2] = width/2
K[1, 2] = height/2
K[0, 0] = K[1, 1] = f
ret = cv2.calibrateCamera([enu_pts.astype(np.float32)],
[im_pts.astype(np.float32)], (width, height),
cameraMatrix=K, distCoeffs=dist, flags=flags,
criteria=criteria)
err, K, dist, rvecs, tvecs = ret
if K[0, 0] > 0 and K[1, 1] > 0:
results.append(ret)
ind = np.argmin([_[0] for _ in results])
err, K, dist, rvecs, tvecs = results[ind]
print('Error:', err)
rvec, tvec = rvecs[0], tvecs[0]
# ----------------------------------------------------------------------------
# ------------------------------- Second Pass --------------------------------
flags = cv2.CALIB_ZERO_TANGENT_DIST
flags = flags | cv2.CALIB_USE_INTRINSIC_GUESS
if fix_aspect_ratio:
flags = flags | cv2.CALIB_FIX_ASPECT_RATIO
flags = flags | cv2.CALIB_FIX_PRINCIPAL_POINT
if not optimize_k1:
flags = flags | cv2.CALIB_FIX_K1
if not optimize_k2:
flags = flags | cv2.CALIB_FIX_K2
if not optimize_k3:
flags = flags | cv2.CALIB_FIX_K3
if not optimize_k4:
flags = flags | cv2.CALIB_FIX_K4
flags = flags | cv2.CALIB_FIX_K5
flags = flags | cv2.CALIB_FIX_K6
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30000,
0.0000001)
# Try a bunch of different initial guesses for focal length.
ret = cv2.calibrateCamera([enu_pts.astype(np.float32)],
[im_pts.astype(np.float32)], (width, height),
cameraMatrix=K, distCoeffs=dist, flags=flags,
criteria=criteria)
err, K, dist, rvecs, tvecs = ret
print('Error:', err)
rvec, tvec = rvecs[0], tvecs[0]
# ----------------------------------------------------------------------------
# ------------------------------- Third Pass --------------------------------
flags = cv2.CALIB_ZERO_TANGENT_DIST
flags = flags | cv2.CALIB_USE_INTRINSIC_GUESS
if fix_aspect_ratio:
flags = flags | cv2.CALIB_FIX_ASPECT_RATIO
# Read model into VTK.
model_reader = vtk_util.load_world_model(mesh_fname)
world_model = WorldModelMesh(mesh_fname)
if fix_principal_point:
flags = flags | cv2.CALIB_FIX_PRINCIPAL_POINT
# Read in the details of all images.
images = read_images_binary(images_bin_fname)
cameras = read_cameras_binary(camera_bin_fname)
if not optimize_k1:
flags = flags | cv2.CALIB_FIX_K1
mosaic_ind = {image_subdirs[i]:i for i in range(len(image_subdirs))}
if not optimize_k2:
flags = flags | cv2.CALIB_FIX_K2
# ---------------- Define a Camera Model for Each Camera ----------------------
if not optimize_k3:
flags = flags | cv2.CALIB_FIX_K3
# Use the calibrate calibration and structure from motion results from the
# Colmap database to define the camera models and their pose for each image.
if not optimize_k4:
flags = flags | cv2.CALIB_FIX_K4
# Pretend image index is the time.
platform_pose_provider = PlatformPoseInterp()
for image_id in images:
image = images[image_id]
flags = flags | cv2.CALIB_FIX_K5
flags = flags | cv2.CALIB_FIX_K6
R = qvec2rotmat(image.qvec)
pos = -np.dot(R.T, image.tvec)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30000,
0.0000001)
# The qvec used by Colmap is a (w, x, y, z) quaternion representing the
# rotation of a vector defined in the world coordinate system into the
# camera coordinate system. However, the 'camera_models' module assumes
# (x, y, z, w) quaternions representing a coordinate system rotation.
quat = transformations.quaternion_inverse(image.qvec)
quat = [quat[1], quat[2], quat[3], quat[0]]
# Try a bunch of different initial guesses for focal length.
ret = cv2.calibrateCamera([enu_pts.astype(np.float32)],
[im_pts.astype(np.float32)], (width, height),
cameraMatrix=K, distCoeffs=dist, flags=flags,
criteria=criteria)
err, K, dist, rvecs, tvecs = ret
print('Error:', err)
rvec, tvec = rvecs[0], tvecs[0]
# ----------------------------------------------------------------------------
plt.close('all')
t = image_id
platform_pose_provider.add_to_pose_time_series(t, pos, quat)
std_cams = {}
for camera_id in set([images[image_id].camera_id for image_id in images]):
colmap_camera = cameras[camera_id]
def show_err(rvec, tvec, K, dist, plot=False):
im_pts_ = cv2.projectPoints(enu_pts, rvec, tvec, K, dist)[0]
im_pts_ = np.squeeze(im_pts_)
err = np.sqrt(np.sum((im_pts - im_pts_)**2, 1))
err = np.mean(err)
if colmap_camera.model == 'OPENCV':
fx, fy, cx, cy, d1, d2, d3, d4 = colmap_camera.params
if plot:
plt.imshow(real_image)
plt.plot(im_pts.T[0], im_pts.T[1], 'bo')
plt.plot(im_pts_.T[0], im_pts_.T[1], 'ro')
for i in range(len(im_pts_)):
plt.plot([im_pts[i, 0], im_pts_[i, 0]],
[im_pts[i, 1], im_pts_[i, 1]], 'g-')
K = K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
dist = np.array([d1, d2, d3, d4])
return err
# StandardCamera has specification for how the camera sits relative to an
# interial navigation system. But, Colmap provides the camera pose directly,
# so we set the camera pose relative to the pose provider as identity
# operations.
std_cams[camera_id] = StandardCamera(colmap_camera.width,
colmap_camera.height, K, dist,
[0, 0, 0], [0, 0, 0, 1],
platform_pose_provider)
# -----------------------------------------------------------------------------
show_err(rvec, tvec, K, dist, plot=True)
def read_image(image_id):
fname = '%s/%s' % (data_dir, images[image_id].name)
image = cv2.imread(fname)
if image.ndim == 3:
image = image[:, :, ::-1]
return image
R = cv2.Rodrigues(rvec)[0]
cam_pos = -np.dot(R.T, tvec).ravel()
# Focus on specific image.
image_ids = sorted(images.keys())
image_names = [images[image_id].name for image_id in images]
# Read model into VTK.
try:
model_reader
assert prev_loaded_fname == mesh_fname
except:
model_reader = vtk_util.load_world_model(mesh_fname)
prev_loaded_fname = mesh_fname
image_id0 = image_ids[image_names.index('Circle-sUAS-Visible-Responder-2017_05_05_afternoon_flight/DSC00135.JPG')]
day1_image_ids = [i for i in image_ids if image_subdirs[0] in images[i].name]
day2_image_ids = [i for i in image_ids if image_subdirs[1] in images[i].name]
# ----------------------------------------------------------------------------
# Map mesh on image to world coordinates.
image0 = images[image_id0]
cm0 = std_cams[image0.camera_id]
clipping_range = [1, 2000]
ret = vtk_util.render_distored_image(width, height, K, dist, cam_pos, R,
model_reader, return_depth=True,
monitor_resolution=(1920, 1080),
clipping_range=clipping_range)
img, depth, E, N, U = ret
# We used image ID as a proxy for image time. There is a state exactly on
# each image_id "time," so no spurious interpolation is done.
P = cm0.get_camera_pose(image_id0)
R = P[:3, :3]
t = P[:, 3]
pos = -np.dot(R.T, t)
latitude, longitude, altitude = enu_to_llh(cam_pos[0], cam_pos[1], cam_pos[2],
mesh_lat0, mesh_lon0, mesh_h0)
K = cm0.K.copy()
K[0, 0] /= 4
K[1, 1] /= 4
cv2.imwrite('%s/rendered_view.png' % save_dir, img[:, :, ::-1])
img, depth, X, Y, Z = vtk_util.render_distored_image(cm0.width, cm0.height,
K, np.zeros(5), pos,
R, model_reader,
return_depth=True,
monitor_resolution=monitor_resolution,
clipping_range=[0.01, 2000])
# R currently is relative to ENU coordinate system at latitude0,
# longitude0, altitude0, but we need it to be relative to latitude,
# longitude, altitude.
Rp = np.dot(rmat_enu_ecef(mesh_lat0, mesh_lon0),
rmat_ecef_enu(latitude, longitude))
gsd = np.mean(np.sqrt(np.diff(X)**2 + np.diff(Y)**2))
filename = '%s/camera_model.yaml' % save_dir
save_static_camera(filename, height, width, K, dist, np.dot(R, Rp),
depth, latitude, longitude, altitude)
# Center of frame.
xyz_center = np.array([X[cm0.height//2, cm0.width//2],
Y[cm0.height//2, cm0.width//2],
Z[cm0.height//2, cm0.width//2]])
# Save (x, y, z) meters per pixel.
if False:
filename = '%s/camera_model.krtd' % save_dir
tvec = -np.dot(R, cam_pos).ravel()
write_camera_krtd_file([K, R, tvec, dist], filename)
d = []
for image_id in other_image_ids:
image = images[image_id]
cm = std_cams[image.camera_id]
ENU = np.dstack([E, N, U]).astype(np.float32)
filename = '%s/xyz_per_pixel.npy' % save_dir
np.save(filename, ENU, allow_pickle=False)
ray_pos, ray_dir = cm.unproject([cm.width/2, cm.height/2], t=image_id)
ip = world_model.intersect_rays(ray_pos, ray_dir).ravel()
d.append(np.sqrt(np.sum((xyz_center - ip)**2)))
depth_image = depth.copy()
depth_image[depth_image > clipping_range[1]*0.9] = 0
depth_image -= depth_image.min()
depth_image /= depth_image.max()/255
depth_image = np.round(depth_image).astype(np.uint8)
other_image_ids = [other_image_ids[i] for i in np.argsort(d)]
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
cv2.imwrite('%s/depth_vizualization.png' % save_dir,
depth_image[:, :, ::-1])
\ No newline at end of file
# Compare images
plt.close('all')
plt.figure(); plt.imshow(read_image(image_id0))
plt.figure(); plt.imshow(read_image(other_image_ids[1]))
# Render mosaic.
mesh = trimesh.load(mesh_fname)
pts = mesh.vertices.T
model_bounds = np.array([[min(pts[i]), max(pts[i])] for i in range(3)])
img_stitched = np.zeros_like(img)
xyz = np.vstack([X.ravel(), Y.ravel(), Z.ravel()])
for image_id in day2_image_ids:
print(image_id)
cm = std_cams[images[image_id].camera_id]
im_pts = cm.project(xyz, t=image_id).astype(np.float32)
mapx = np.reshape(im_pts[0], X.shape)
mapy = np.reshape(im_pts[1], X.shape)
radius = 50
mask = np.logical_and(mapx > radius, mapx < cm.width - radius)
mask = np.logical_and(mask, mapy > radius)
mask = np.logical_and(mask, mapy < cm.height - radius)
img_ = cv2.remap(read_image(image_id), mapx, mapy, cv2.INTER_AREA)
img_stitched[mask] = img_[mask]
#! /usr/bin/python
"""
ckwg +31
Copyright 2018 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
"""
from __future__ import division, print_function
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import glob
from osgeo import osr, gdal
from scipy.optimize import fmin, minimize, fminbound
import kwcoco
import kwimage
import sys
from math import sqrt
def human_reasable_size(size, precision=2):
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision,size,suffixes[suffixIndex])
def get_size_in_bytes(obj, seen=None):
"""Recursively finds size of objects in bytes"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if hasattr(obj, '__dict__'):
for cls in obj.__class__.__mro__:
if '__dict__' in cls.__dict__:
d = cls.__dict__['__dict__']
if inspect.isgetsetdescriptor(d) or inspect.ismemberdescriptor(d):
size += get_size_in_bytes(obj.__dict__, seen)
break
if isinstance(obj, dict):
size += sum((get_size_in_bytes(v, seen) for v in obj.values()))
size += sum((get_size_in_bytes(k, seen) for k in obj.keys()))
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum((get_size_in_bytes(i, seen) for i in obj))
return size
def ramer_douglas_peucker(points, epsilon):
"""Reduce number of points in curve using Ramer-Douglas-Peucker algorithm.
:param points: Array of points.