Spaces:
Configuration error
Configuration error
| """ | |
| Image processing tools | |
| Modified from open source projects: | |
| (https://github.com/nkolot/GraphCMR/) | |
| (https://github.com/open-mmlab/mmdetection) | |
| """ | |
| import numpy as np | |
| import base64 | |
| import cv2 | |
| import torch | |
| import scipy.misc | |
| def img_from_base64(imagestring): | |
| try: | |
| jpgbytestring = base64.b64decode(imagestring) | |
| nparr = np.frombuffer(jpgbytestring, np.uint8) | |
| r = cv2.imdecode(nparr, cv2.IMREAD_COLOR) | |
| return r | |
| except ValueError: | |
| return None | |
| def myimrotate(img, angle, center=None, scale=1.0, border_value=0, auto_bound=False): | |
| if center is not None and auto_bound: | |
| raise ValueError('`auto_bound` conflicts with `center`') | |
| h, w = img.shape[:2] | |
| if center is None: | |
| center = ((w - 1) * 0.5, (h - 1) * 0.5) | |
| assert isinstance(center, tuple) | |
| matrix = cv2.getRotationMatrix2D(center, angle, scale) | |
| if auto_bound: | |
| cos = np.abs(matrix[0, 0]) | |
| sin = np.abs(matrix[0, 1]) | |
| new_w = h * sin + w * cos | |
| new_h = h * cos + w * sin | |
| matrix[0, 2] += (new_w - w) * 0.5 | |
| matrix[1, 2] += (new_h - h) * 0.5 | |
| w = int(np.round(new_w)) | |
| h = int(np.round(new_h)) | |
| rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value) | |
| return rotated | |
| def myimresize(img, size, return_scale=False, interpolation='bilinear'): | |
| h, w = img.shape[:2] | |
| resized_img = cv2.resize( | |
| img, (size[0],size[1]), interpolation=cv2.INTER_LINEAR) | |
| if not return_scale: | |
| return resized_img | |
| else: | |
| w_scale = size[0] / w | |
| h_scale = size[1] / h | |
| return resized_img, w_scale, h_scale | |
| def get_transform(center, scale, res, rot=0): | |
| """Generate transformation matrix.""" | |
| h = 200 * scale | |
| t = np.zeros((3, 3)) | |
| t[0, 0] = float(res[1]) / h | |
| t[1, 1] = float(res[0]) / h | |
| t[0, 2] = res[1] * (-float(center[0]) / h + .5) | |
| t[1, 2] = res[0] * (-float(center[1]) / h + .5) | |
| t[2, 2] = 1 | |
| if not rot == 0: | |
| rot = -rot # To match direction of rotation from cropping | |
| rot_mat = np.zeros((3,3)) | |
| rot_rad = rot * np.pi / 180 | |
| sn,cs = np.sin(rot_rad), np.cos(rot_rad) | |
| rot_mat[0,:2] = [cs, -sn] | |
| rot_mat[1,:2] = [sn, cs] | |
| rot_mat[2,2] = 1 | |
| # Need to rotate around center | |
| t_mat = np.eye(3) | |
| t_mat[0,2] = -res[1]/2 | |
| t_mat[1,2] = -res[0]/2 | |
| t_inv = t_mat.copy() | |
| t_inv[:2,2] *= -1 | |
| t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t))) | |
| return t | |
| def transform(pt, center, scale, res, invert=0, rot=0): | |
| """Transform pixel location to different reference.""" | |
| t = get_transform(center, scale, res, rot=rot) | |
| if invert: | |
| # t = np.linalg.inv(t) | |
| t_torch = torch.from_numpy(t) | |
| t_torch = torch.inverse(t_torch) | |
| t = t_torch.numpy() | |
| new_pt = np.array([pt[0]-1, pt[1]-1, 1.]).T | |
| new_pt = np.dot(t, new_pt) | |
| return new_pt[:2].astype(int)+1 | |
| def crop(img, center, scale, res, rot=0): | |
| """Crop image according to the supplied bounding box.""" | |
| # Upper left point | |
| ul = np.array(transform([1, 1], center, scale, res, invert=1))-1 | |
| # Bottom right point | |
| br = np.array(transform([res[0]+1, | |
| res[1]+1], center, scale, res, invert=1))-1 | |
| # Padding so that when rotated proper amount of context is included | |
| pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2) | |
| if not rot == 0: | |
| ul -= pad | |
| br += pad | |
| new_shape = [br[1] - ul[1], br[0] - ul[0]] | |
| if len(img.shape) > 2: | |
| new_shape += [img.shape[2]] | |
| new_img = np.zeros(new_shape) | |
| # Range to fill new array | |
| new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0] | |
| new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1] | |
| # Range to sample from original image | |
| old_x = max(0, ul[0]), min(len(img[0]), br[0]) | |
| old_y = max(0, ul[1]), min(len(img), br[1]) | |
| new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], | |
| old_x[0]:old_x[1]] | |
| if not rot == 0: | |
| # Remove padding | |
| # new_img = scipy.misc.imrotate(new_img, rot) | |
| new_img = myimrotate(new_img, rot) | |
| new_img = new_img[pad:-pad, pad:-pad] | |
| # new_img = scipy.misc.imresize(new_img, res) | |
| new_img = myimresize(new_img, [res[0], res[1]]) | |
| return new_img | |
| def uncrop(img, center, scale, orig_shape, rot=0, is_rgb=True): | |
| """'Undo' the image cropping/resizing. | |
| This function is used when evaluating mask/part segmentation. | |
| """ | |
| res = img.shape[:2] | |
| # Upper left point | |
| ul = np.array(transform([1, 1], center, scale, res, invert=1))-1 | |
| # Bottom right point | |
| br = np.array(transform([res[0]+1,res[1]+1], center, scale, res, invert=1))-1 | |
| # size of cropped image | |
| crop_shape = [br[1] - ul[1], br[0] - ul[0]] | |
| new_shape = [br[1] - ul[1], br[0] - ul[0]] | |
| if len(img.shape) > 2: | |
| new_shape += [img.shape[2]] | |
| new_img = np.zeros(orig_shape, dtype=np.uint8) | |
| # Range to fill new array | |
| new_x = max(0, -ul[0]), min(br[0], orig_shape[1]) - ul[0] | |
| new_y = max(0, -ul[1]), min(br[1], orig_shape[0]) - ul[1] | |
| # Range to sample from original image | |
| old_x = max(0, ul[0]), min(orig_shape[1], br[0]) | |
| old_y = max(0, ul[1]), min(orig_shape[0], br[1]) | |
| # img = scipy.misc.imresize(img, crop_shape, interp='nearest') | |
| img = myimresize(img, [crop_shape[0],crop_shape[1]]) | |
| new_img[old_y[0]:old_y[1], old_x[0]:old_x[1]] = img[new_y[0]:new_y[1], new_x[0]:new_x[1]] | |
| return new_img | |
| def rot_aa(aa, rot): | |
| """Rotate axis angle parameters.""" | |
| # pose parameters | |
| R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], | |
| [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0], | |
| [0, 0, 1]]) | |
| # find the rotation of the body in camera frame | |
| per_rdg, _ = cv2.Rodrigues(aa) | |
| # apply the global rotation to the global orientation | |
| resrot, _ = cv2.Rodrigues(np.dot(R,per_rdg)) | |
| aa = (resrot.T)[0] | |
| return aa | |
| def flip_img(img): | |
| """Flip rgb images or masks. | |
| channels come last, e.g. (256,256,3). | |
| """ | |
| img = np.fliplr(img) | |
| return img | |
| def flip_kp(kp): | |
| """Flip keypoints.""" | |
| flipped_parts = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22] | |
| kp = kp[flipped_parts] | |
| kp[:,0] = - kp[:,0] | |
| return kp | |
| def flip_pose(pose): | |
| """Flip pose. | |
| The flipping is based on SMPL parameters. | |
| """ | |
| flippedParts = [0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13, | |
| 14 ,18, 19, 20, 24, 25, 26, 21, 22, 23, 27, 28, 29, 33, | |
| 34, 35, 30, 31, 32, 36, 37, 38, 42, 43, 44, 39, 40, 41, | |
| 45, 46, 47, 51, 52, 53, 48, 49, 50, 57, 58, 59, 54, 55, | |
| 56, 63, 64, 65, 60, 61, 62, 69, 70, 71, 66, 67, 68] | |
| pose = pose[flippedParts] | |
| # we also negate the second and the third dimension of the axis-angle | |
| pose[1::3] = -pose[1::3] | |
| pose[2::3] = -pose[2::3] | |
| return pose | |
| def flip_aa(aa): | |
| """Flip axis-angle representation. | |
| We negate the second and the third dimension of the axis-angle. | |
| """ | |
| aa[1] = -aa[1] | |
| aa[2] = -aa[2] | |
| return aa |