Beispiel #1
0
def merge_mesh(in_file1, in_file2, out_file):
	filename = '%s_filter.mlx' % get_timestring()
	filter_script_path = create_merge_filter_file(filename)  

	command = "meshlabserver -i %s %s" % (in_file1, in_file2)
	command += " -s " + filter_script_path
	command += " -o " + out_file + " -om vn fn vc"

	subprocess.call(command, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
Beispiel #2
0
def mesh_list_merge(in_file_list, out_file):
    filename = '%s_filter.mlx' % get_timestring()
    filter_script_path = create_merge_filter_file(filename=filename)

    command = "meshlabserver -i"
    for in_file in in_file_list:
        command = "%s %s" % (command, in_file)
    command += " -s " + filter_script_path
    command += " -o " + out_file + " -om vn fn vc"
    subprocess.call(command,
                    shell=True,
                    stdout=open(os.devnull, 'w'),
                    stderr=subprocess.STDOUT)
                    help='the number of input channels')

parser.add_argument('--train', action='store_true', help='training mode')
parser.add_argument('--val', action='store_true', help='validation mode')
parser.add_argument('--test', action='store_true', help='testing mode')
parser.add_argument('--vis', action='store_true', help='visualization mode')
parser.add_argument('--seed', type=int, default=0, help='random seed')
args = parser.parse_args()
torch.backends.cudnn.benchmark = True
prepare_seed(args.seed)

print("Loading options...")
with open('options.toml', 'r') as optionsFile:
    options = toml.loads(optionsFile.read())
args.save_dir = os.path.join(options["general"]["modelsavedir"],
                             args.modelname + '_' + get_timestring())
mkdir_if_missing(args.save_dir)
args.dataset = options["general"]["dataset"]
args.logfile = os.path.join(args.save_dir, 'log.txt')
args.logfile = open(args.logfile, 'w')
# print_log(options, args.logfile)
print_log(args, args.logfile)
print_log('\n\nsaving to %s' % args.save_dir, log=args.logfile)

print_log('creating the model\n\n', log=args.logfile)
if args.modelname == 'C3D_CONV_BLSTM': model = C3D_CONV_BLSTM(args)
elif args.modelname == 'C3D_CONV_BLSTM_frontfix': model = C3D_CONV_BLSTM(args)
elif args.modelname == 'C3D_CONV_CONV': model = C3D_CONV_CONV(args)
elif args.modelname == 'I3D_BLSTM': model = I3D_BLSTM()
elif args.modelname == 'I3D': model = I3D()
elif args.modelname == 'I3D_BLSTM_mini': model = I3D_BLSTM_mini()
from __future__ import print_function
import torch, toml, os
from models import LipRead
from training import Trainer
from validation import Validator
from xinshuo_miscellaneous import get_timestring, print_log
from xinshuo_io import mkdir_if_missing

print("Loading options...")
with open('options.toml', 'r') as optionsFile:
    options = toml.loads(optionsFile.read())
if options["general"]["usecudnnbenchmark"] and options["general"]["usecudnn"]:
    torch.backends.cudnn.benchmark = True
options["general"]["modelsavedir"] = os.path.join(
    options["general"]["modelsavedir"], 'trained_model_' + get_timestring())
mkdir_if_missing(options["general"]["modelsavedir"])
options["general"]["logfile"] = open(
    os.path.join(options["general"]["modelsavedir"], 'log.txt'), 'w')

print_log('saving to %s' % options["general"]["modelsavedir"],
          log=options["general"]["logfile"])

print_log('creating the model', log=options["general"]["logfile"])
model = LipRead(options)

print_log('loading model', log=options["general"]["logfile"])
if options["general"]["loadpretrainedmodel"]:
    print_log('loading the pretrained model at %s' %
              options["general"]["pretrainedmodelpath"],
              log=options["general"]["logfile"])
Beispiel #5
0
parser.add_argument('--neighborhood_size', default=2.0, type=float)
parser.add_argument('--grid_size', default=8, type=int)

# Discriminator Options
parser.add_argument('--d_type', default='local', type=str)
parser.add_argument('--encoder_h_dim_d', default=64, type=int)
parser.add_argument('--d_learning_rate', default=5e-4, type=float)
parser.add_argument('--d_steps', default=2, type=int)
parser.add_argument('--clipping_threshold_d', default=0, type=float)

# Loss Options
parser.add_argument('--l2_loss_weight', default=0, type=float)
parser.add_argument('--best_k', default=1, type=int)

# Output
parser.add_argument('--output_dir', default='./tmp/train_%s' % get_timestring())
parser.add_argument('--print_every', default=5, type=int)
parser.add_argument('--checkpoint_every', default=100, type=int)
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=1, type=int)
parser.add_argument('--num_samples_check', default=5000, type=int)

# Misc
parser.add_argument('--use_gpu', default=1, type=int)
parser.add_argument('--timing', default=0, type=int)
parser.add_argument('--gpu_num', default="2", type=str)

def init_weights(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
# dir_dict['Car_20'] = 'train_20210515_12h41m52s_Car_20/results_20210516_14h55m25s'
# dir_dict['Car_50'] = 'train_20210515_12h42m09s_Car_50/results_20210516_14h55m43s'
# dir_dict['Ped_10'] = 'train_20210513_22h21m32s_Ped_10/results_20210516_15h00m19s'
# dir_dict['Ped_20'] = 'train_20210515_12h42m22s_Ped_20/results_20210516_15h00m36s'
# dir_dict['Ped_50'] = 'train_20210515_12h42m30s_Ped_50/results_20210516_15h00m56s'
# dir_dict['Cyc_10'] = 'train_20210513_21h15m39s_Cyc_10/results_20210516_14h56m23s'
# dir_dict['Cyc_20'] = 'train_20210514_15h30m28s_Cyc_20/results_20210516_14h57m01s'
# dir_dict['Cyc_50'] = 'train_20210514_15h39m29s_Cyc_50/results_20210516_14h57m19s'
# dir_dict['Mot_10'] = 'train_20210513_22h17m10s_Mot_10/results_20210516_14h51m26s'
# dir_dict['Mot_20'] = 'train_20210515_13h43m45s_Mot_20/results_20210516_14h51m33s'
# dir_dict['Mot_50'] = 'train_20210515_13h44m02s_Mot_50/results_20210516_14h52m35s'

# data = {10: {}, 20: {}, 50: {}}
# for pred_len in [10, 20, 50]:        # 1s, 2s, 5s prediction settings
data = {20: {}}
for pred_len in [20]:  # 2s prediction settings
    for obj_class in ['Car', 'Ped', 'Cyc', 'Mot']:
        key = '%s_%d' % (obj_class, pred_len)
        dir_tmp = dir_dict[key]
        path_tmp = os.path.join(root_dir, dir_tmp, 'results.json')
        print('loading results from %s' % path_tmp)
        with open(path_tmp, 'r') as file:
            data_tmp = json.load(file)

        # copy each dict to the single final dictionary
        data[pred_len][obj_class] = data_tmp[str(pred_len)][obj_class]

print('saving')
save_file = os.path.join(root_dir, 'results_all_%s.json' % get_timestring())
with open(save_file, 'w') as outfile:
    json.dump(data, outfile)
Beispiel #7
0
	# Set batch size to 1 since we'll be running inference on one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
	NAME = 'evaluate_%s' % train_dataset
	GPU_COUNT = 1
	IMAGES_PER_GPU = 1
	# DETECTION_MIN_CONFIDENCE = 0
	if train_dataset == 'coco': NUM_CLASSES = 1 + 80
	elif train_dataset == 'cityscape': NUM_CLASSES = 1 + len(cityscape_class_names)
	elif train_dataset == 'kitti': NUM_CLASSES = 1 + len(kitti_class_names)
	else: assert False, 'error'
config = InferenceConfig()
# config.DETECTION_MIN_CONFIDENCE = 0.7
config.DETECTION_MIN_CONFIDENCE = 0

for epoch in epoch_list_to_evaluate:
	##--------------------------------- Data Directory ----------------------------------##
	results_name = 'maskrcnn_bbox_detection_results_%s_%s_%s_epoch%d_%s_fulldetection' % (train_dataset, split, model_folder, epoch, get_timestring())
	split_file = os.path.join(kitti_dir, 'mykitti/object/mysplit/%s.txt' % split)
	images_dir = os.path.join(data_dir, 'image_2')
	save_dir = os.path.join(data_dir, 'results/%s' % results_name); mkdir_if_missing(save_dir)
	vis_dir = os.path.join(save_dir, 'visualization'); mkdir_if_missing(vis_dir)
	log_file = os.path.join(save_dir, 'log.txt'); log_file = open(log_file, 'w')
	bbox_eval_folder = os.path.join(save_dir, 'data'); mkdir_if_missing(bbox_eval_folder)
	mask_dir = os.path.join(save_dir, 'masks'); mkdir_if_missing(mask_dir)
	label_bbox_match_dir = os.path.join(save_dir, 'label_bbox_matching'); mkdir_if_missing(label_bbox_match_dir)
	detection_result_filepath = os.path.join(save_dir, 'mask_results.txt'); detection_results_file = open(detection_result_filepath, 'w')

	##--------------------------------- Model Directory ----------------------------------##
	if train_dataset == 'coco': model_path = os.path.join(root_dir, '../models/mask_rcnn_coco.pth')    			# Path to trained weights file
	elif train_dataset == 'cityscape': model_path = '/media/xinshuo/Data/models/mask_rcnn_pytorch/%s/mask_rcnn_cityscape_%04d.pth' % (model_folder, epoch)
	elif train_dataset == 'kitti': model_path = '/media/xinshuo/Data/models/mask_rcnn_pytorch/%s/mask_rcnn_kitti_%04d.pth' % (model_folder, epoch)
	else: model_path = os.path.join(root_dir, 'resnet50_imagenet.pth')    		# Path to trained weights from Imagenet
Beispiel #8
0
def evaluate(args, loader, generator, num_samples, path):
    # ade_outer, fde_outer = [], []
    ade_all, fde_all = AverageMeter(), AverageMeter()
    total_obj = 0
    pred_len = args.pred_len
    dataset_name = args.dataset_name
    obj_class = dataset_name.split('_')[1][:3]

    save_dir, _, _ = fileparts(path)
    save_dir = os.path.join(save_dir, 'results_%s' % get_timestring())
    mkdir_if_missing(save_dir)
    result_file_single = os.path.join(save_dir, 'results.json')
    result_dict = dict()
    with torch.no_grad():
        for batch in loader:
            batch = [tensor.cuda() for tensor in batch]
            (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,
             non_linear_ped, loss_mask, seq_start_end, id_frame) = batch
            # obs_traj          frames x objects x 2
            # pred_traj_gt      frames x objects x 2
            # seq_Start_end     start, end of ped index in each timestamp, used for pooling at every timestamp
            # id_frame          2frames x objects x 3
            # loss_mask         objects x 2frames

            num_obs = obs_traj.size(0)
            num_objects = obs_traj.size(1)
            id_frame_pred = id_frame[num_obs:]  # frames x obj x 3
            loss_mask_pred = loss_mask[:, num_obs:]  # objects x seq_len

            ade, fde = [], []
            for sample_index in range(num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(
                    pred_traj_fake_rel, obs_traj[-1])  # frames x objects x 2

                # save results
                for object_index in range(num_objects):
                    id_frame_tmp = id_frame_pred[:, object_index, :]
                    frame = int(id_frame_tmp[0, 0].item())

                    # seqname should be the same across frames
                    seq = np.unique(id_frame_tmp[:, -1].cpu().clone().numpy())
                    assert len(seq) == 1, 'error'
                    seqname = int2seqname(seq[0])  # AIODrive only

                    # seqname should be the same across frames
                    ID = np.unique(id_frame_tmp[:, 1].cpu().clone().numpy())
                    assert len(ID) == 1, 'error'
                    ID = int(ID[0])

                    # saving to individual frames
                    final_results = torch.cat([
                        id_frame_tmp[:, :2], pred_traj_fake[:, object_index, :]
                    ],
                                              axis=-1).cpu().clone().numpy()
                    save_path = os.path.join(
                        save_dir, seqname, 'frame_%06d' % (frame),
                        'sample_%03d' % sample_index + '.txt')
                    mkdir_if_missing(save_path)
                    with open(save_path, 'a') as f:
                        np.savetxt(f, final_results, fmt="%.3f")

                    # saving to a single file, result format
                    # {seqname1: {frame1: {sample1: {ID1: {state: N x 2, prob: 1}}}, seqname2, ...}
                    if seqname not in result_dict.keys():
                        result_dict[seqname] = dict()
                    if frame not in result_dict[seqname].keys():
                        result_dict[seqname][frame] = dict()
                    if sample_index not in result_dict[seqname][frame].keys():
                        result_dict[seqname][frame][sample_index] = dict()
                    if ID not in result_dict[seqname][frame][
                            sample_index].keys():
                        result_dict[seqname][frame][sample_index][ID] = dict()
                    result_dict[seqname][frame][sample_index][
                        ID]['state'] = pred_traj_fake[:, object_index, :].cpu(
                        ).clone().numpy().tolist()
                    result_dict[seqname][frame][sample_index][ID]['prob'] = 1.0

                # compute ADE
                ade_tmp = displacement_error(
                    pred_traj_fake,
                    pred_traj_gt,
                    mode='raw',
                    mask=loss_mask_pred
                )  # list of ade for each object in the batch
                ade.append(ade_tmp)  # list of error for all samples

                # select the right last timestamp for FDE computation, i.e., not select the last frame if masked out
                pred_traj_last = []
                gt_traj_last = []
                for obj_tmp in range(num_objects):
                    loss_mask_tmp = loss_mask_pred[obj_tmp]  # seq_len
                    good_index = torch.nonzero(loss_mask_tmp)
                    if torch.nonzero(loss_mask_tmp).size(0) == 0:
                        pred_traj_last.append(torch.zeros(2).cuda() / 0)
                        gt_traj_last.append(torch.zeros(2).cuda() / 0)
                    else:
                        last_index = torch.max(good_index)
                        pred_traj_last.append(pred_traj_fake[last_index,
                                                             obj_tmp, :])
                        gt_traj_last.append(pred_traj_gt[last_index,
                                                         obj_tmp, :])
                gt_traj_last = torch.stack(gt_traj_last, dim=0)  # num_obj x 2
                pred_traj_last = torch.stack(pred_traj_last,
                                             dim=0)  # num_obj x 2

                # compute FDE
                fde_tmp = final_displacement_error(pred_traj_last,
                                                   gt_traj_last,
                                                   mode='raw')
                fde.append(fde_tmp)  # list of error for all samples

            # select the one sample with the minimum errors, remove nan
            num_invalid = torch.sum(torch.isnan(ade_tmp))
            num_valid = pred_traj_gt.size(1) - num_invalid
            total_obj += num_valid  # only add No.obj if it is valid, not all future frames are padded
            ade_ave, num_obj = best_of_K(ade, seq_start_end, err_type='ADE')
            fde_ave, num_obj = best_of_K(fde, seq_start_end, err_type='FDE')
            ade_all.update(ade_ave, n=num_obj)
            fde_all.update(fde_ave, n=num_obj)

        actual_len = pred_len * args.skip
        final_dict = {actual_len: {obj_class: result_dict}}
        with open(result_file_single, 'w') as outfile:
            json.dump(final_dict, outfile)

        return ade_all.avg, fde_all.avg