Skip to content

Commit

Permalink
merge normalLoader and dataLoader, add consistency checks
Browse files Browse the repository at this point in the history
Also adjust normals GT paths to match generate_normals.py output.
Normals GT is now separate for left and right cameras.
Add option to use camera image path format used by the `raw_data_downloader.sh` script.
  • Loading branch information
valgur committed Mar 23, 2020
1 parent b973737 commit 025c49c
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 121 deletions.
118 changes: 64 additions & 54 deletions dataloader/dataLoader.py
Original file line number Diff line number Diff line change
@@ -1,72 +1,82 @@
from __future__ import print_function

import os
import os.path
import numpy as np
from os.path import join, exists

ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
parent_path = os.path.dirname(ROOT_DIR)
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)

def dataloader(filepath):
def dataloader(data_dir, separate_raw_dir=False):
images = []
lidars = []
depths = []
depths_gt = []
normals_gt = []

temp = filepath
filepathl = temp + 'data_depth_velodyne/train'
filepathd = temp + 'data_depth_annotated/train'
filepathgt = temp + 'gt/out/train'
if separate_raw_dir:
imgs_root = join(data_dir, 'raw')
sparse_depth_root = join(data_dir, 'data_depth_velodyne/train')
depth_gt_root = join(data_dir, 'data_depth_annotated/train')
normals_gt_root = join(data_dir, 'normals_gt/train')

seqs = [seq for seq in os.listdir(filepathl) if seq.find('sync') > -1]
left_fold = '/image_02/data'
right_fold = '/image_03/data'
lidar_foldl = '/proj_depth/velodyne_raw/image_02'
lidar_foldr = '/proj_depth/velodyne_raw/image_03'
depth_foldl = '/proj_depth/groundtruth/image_02'
depth_foldr = '/proj_depth/groundtruth/image_03'
seqs = sorted(seq for seq in os.listdir(sparse_depth_root) if seq.endswith('_sync'))

for seq in seqs:
temp = os.path.join(filepathgt, seq)
date = seq.split('_drive')[0]
for cam_dir in ('image_02', 'image_03'):
if separate_raw_dir:
imgs_path = join(imgs_root, date, seq, cam_dir, 'data')
else:
imgs_path = join(sparse_depth_root, seq, cam_dir, 'data')
lidars_path = join(sparse_depth_root, seq, 'proj_depth/velodyne_raw', cam_dir)
depth_gt_path = join(depth_gt_root, seq, 'proj_depth/groundtruth', cam_dir)
normals_gt_path = join(normals_gt_root, seq, cam_dir)

imgsl = os.path.join(filepathl, seq) + left_fold
imagel = [os.path.join(imgsl, img) for img in os.listdir(temp)]
imagel.sort()
images = np.append(images, imagel)
imgsr = os.path.join(filepathl, seq) + right_fold
imager = [os.path.join(imgsr, img) for img in os.listdir(temp)]
imager.sort()
images = np.append(images, imager)
if not exists(imgs_path):
print("Warning: missing data dir", imgs_path)
continue
if not exists(lidars_path):
print("Warning: missing data dir", lidars_path)
continue
if not exists(depth_gt_path):
print("Warning: missing data dir", depth_gt_path)
continue
if not exists(normals_gt_path):
print("Warning: missing data dir", normals_gt_path)
continue

lids2l = os.path.join(filepathl, seq) + lidar_foldl
lidar2l = [os.path.join(lids2l, lid) for lid in os.listdir(temp)]
lidar2l.sort()
lidars = np.append(lidars, lidar2l)
lids2r = os.path.join(filepathl, seq) + lidar_foldr
lidar2r = [os.path.join(lids2r, lid) for lid in os.listdir(temp)]
lidar2r.sort()
lidars = np.append(lidars, lidar2r)
img_files = set(os.listdir(imgs_path))
lidar_files = set(os.listdir(lidars_path))
depth_gt_files = set(os.listdir(depth_gt_path))
normals_gt_files = set(os.listdir(normals_gt_path))

depsl = os.path.join(filepathd, seq) + depth_foldl
depthl = [os.path.join(depsl, dep) for dep in os.listdir(temp)]
depthl.sort()
depths = np.append(depths, depthl)
depsr = os.path.join(filepathd, seq) + depth_foldr
depthr = [os.path.join(depsr, dep) for dep in os.listdir(temp)]
depthr.sort()
depths = np.append(depths, depthr)
img_depth_diff = 14 if seq == '2011_09_26_drive_0009_sync' else 10
max_size = max(len(img_files) - img_depth_diff,
len(lidar_files), len(depth_gt_files), len(normals_gt_files))
if len(img_files) - img_depth_diff < max_size:
print("Warning:", max_size + img_depth_diff - len(img_files), "files missing in", imgs_path)
if len(lidar_files) < max_size:
print("Warning:", max_size - len(lidar_files), "files missing in", lidars_path)
if len(depth_gt_files) < max_size:
print("Warning:", max_size - len(depth_gt_files), "files missing in", depth_gt_path)
if len(normals_gt_files) < max_size:
print("Warning:", max_size - len(normals_gt_files), "files missing in", normals_gt_path)

left_train = images
lidar2_train = lidars
depth_train = depths
common_files = sorted(
img_files &
lidar_files &
depth_gt_files &
normals_gt_files
)
images += [join(imgs_path, img) for img in common_files]
lidars += [join(lidars_path, lid) for lid in common_files]
depths_gt += [join(depth_gt_path, dep) for dep in common_files]
normals_gt += [join(normals_gt_path, norm) for norm in common_files]

return left_train,lidar2_train,depth_train
return images, lidars, normals_gt, depths_gt


if __name__ == '__main__':
datapath = ''

import sys
from pprint import pprint

result = dataloader(sys.argv[1])
print("Found", len(result[0]), "samples")
pprint(list(zip(*result))[:3])
63 changes: 0 additions & 63 deletions dataloader/nomalLoader.py

This file was deleted.

2 changes: 1 addition & 1 deletion trainings/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
if args.cuda:
torch.cuda.manual_seed(args.seed)

all_left_img,all_sparse,all_depth = lsn.dataloader(datapath)
all_left_img, all_sparse, _, all_depth = lsn.dataloader(datapath)

TrainImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(all_left_img,all_sparse,all_depth, True),
Expand Down
2 changes: 1 addition & 1 deletion trainings/trainD.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
if args.cuda:
torch.cuda.manual_seed(args.seed)

all_left_img,all_sparse,all_depth = lsn.dataloader(datapath)
all_left_img, all_sparse, _, all_depth = lsn.dataloader(datapath)

TrainImgLoader = torch.utils.data.DataLoader(
DA.myImageFloder(all_left_img,all_sparse,all_depth, True),
Expand Down
4 changes: 2 additions & 2 deletions trainings/trainN.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from torch.autograd import Variable
import torch.nn.functional as F
import time
from dataloader import nomalLoader as lsn
from dataloader import dataLoader as lsn
from dataloader import trainLoaderN as DA
from submodels import *

Expand Down Expand Up @@ -48,7 +48,7 @@
all_normal = []
all_gts = []
if args.model == 'normal':
all_left_img, all_normal, all_gts = lsn.dataloader(datapath)
all_left_img, all_normal, all_gts, _ = lsn.dataloader(datapath)


print(len(all_left_img))
Expand Down

0 comments on commit 025c49c

Please sign in to comment.