Beispiel #1
0
    def __init__(self, robot, human_model):
        param_list = [
            "hit_human_penalty", "normalize_sigma", "care_about_distance",
            "eef_link_name"
        ]
        namespace = "/cost_func/"
        param_dict = util.set_params(param_list, namespace)

        CostFunction.__init__(self, params=param_dict)
        self.robot = robot  #TODO replace with imported variable
        self.human_model = human_model
        self.robotDOF = rospy.get_param("/robot_description_kinematics/ndof")
    def __init__(self, start_state):
        self.start_state = copy.deepcopy(start_state)

        #specify which parameters I would like to use
        params = [
            "dt", "mass", "human_avoidance", "drag", "force_max",
            "certainty_speed_max"
        ]
        namespace = "/human_model/"
        self.params = util.set_params(params, namespace)

        #list to keep track of human positions for displaying
        self.human_positions = []
        self.current_state = copy.deepcopy(start_state)
        self.human_positions.append(start_state.position)

        marker_wrapper.show_position_marker(label="human \n start\n\n",
                                            position=start_state.position,
                                            ident=1,
                                            color=(1, 0, 0))
    #more option to be added
    
    args = parser.parse_args() #temporary

    Dir = args.axis
    basis_set = args.obs
    geomA = args.geom_act
    geomB = args.geom_env
    WD = args.wkd
    debug = args.debug 
    
    #basis_set : defined from input

    print("Read input ... ")
    
    imp_opts, calc_params = util.set_params(args.inputfile)
    func = calc_params['func_type'] # from input.inp. default : blyp
    
    #set the basis and options suitable for 
    psi4.set_options({'basis' : args.obs,
                      'puream': 'True',
                      'DF_SCF_GUESS': 'False',
                      'scf_type': 'direct',
                      'cubeprop_tasks': ['density'],
                      'e_convergence': 1e-8,
                      'd_convergence': 1e-8})
    
    print('CHECK : basis from options block :%s\n' % (psi4.core.get_global_option('BASIS')))
    ene = None 
    active_wfn = None
    enviro_wfn = None
Beispiel #4
0
def main():
    global args, best_error, viz
    args = util.set_params(parser)
    logging.info("[starting]" * 10)
    train_writer = SummaryWriter(args.save_path / 'train')
    val_writer = SummaryWriter(args.save_path / 'val')
    output_writers = []
    if args.log_output:
        for i in range(3):
            output_writers.append(
                SummaryWriter(args.save_path / 'val' / str(i)))
    torch.manual_seed(args.seed)

    # Data loading code
    mean = [0.5, 0.5, 0.5]
    std = [0.2, 0.2, 0.2]
    normalize = transforms.Normalize(mean=mean, std=std)
    input_transform = transforms.Compose([
        co_transforms.ArrayToTensor(),
        transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]), normalize
    ])
    target_transform = transforms.Compose(
        [co_transforms.Clip(0, 100),
         co_transforms.ArrayToTensor()])
    co_transform = co_transforms.Compose([
        co_transforms.RandomVerticalFlip(),
        co_transforms.RandomHorizontalFlip()
    ])

    logging.info("=> fetching scenes in '{}'".format(args.data))
    train_set, val_set = datasets.still_box(args.data,
                                            transform=input_transform,
                                            target_transform=target_transform,
                                            co_transform=co_transform,
                                            split=args.split,
                                            seed=args.seed)
    logging.info(
        '{} samples found, {} train scenes and {} validation samples '.format(
            len(val_set) + len(train_set), len(train_set), len(val_set)))
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    if args.epoch_size == 0:
        args.epoch_size = len(train_loader)
    # create model
    if args.pretrained:
        data = torch.load(args.pretrained)
        assert (not data['with_confidence'])
        print("=> using pre-trained model '{}'".format(data['arch']))
        model = models.DepthNet(batch_norm=data['bn'],
                                clamp=args.clamp,
                                depth_activation=args.activation_function)
        model.load_state_dict(data['state_dict'])
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.DepthNet(batch_norm=args.bn,
                                clamp=args.clamp,
                                depth_activation=args.activation_function)
    model = model.to(device)
    logging.info("Model created")
    # if torch.cuda.device_count() > 1:
    # print("%"*100)
    # print("Let's use", torch.cuda.device_count(), "GPUs!")
    # # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
    # model = torch.nn.DataParallel(model, device_ids=device_ids)

    # if torch.cuda.is_available():
    # print("&"*100)
    # model.cuda()

    #model = torch.nn.DataParallel(model.cuda(1), device_ids=device_ids)
    cudnn.benchmark = True

    assert (args.solver in ['adam', 'sgd'])
    print('=> setting {} solver'.format(args.solver))
    if args.solver == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     betas=(args.momentum, args.beta),
                                     weight_decay=args.weight_decay)
    elif args.solver == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    dampening=args.momentum)

    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[19, 30, 44, 53], gamma=0.3)
    logging.info("Optimizer created")

    with open(os.path.join(args.save_path, args.log_summary), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter='\t')
        writer.writerow([
            'train_loss', 'train_depth_error', 'normalized_train_depth_error',
            'depth_error', 'normalized_depth_error'
        ])

    with open(os.path.join(args.save_path, args.log_full), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter='\t')
        writer.writerow(['train_loss', 'train_depth_error'])

    term_logger = TermLogger(n_epochs=args.epochs,
                             train_size=min(len(train_loader),
                                            args.epoch_size),
                             test_size=len(val_loader))
    term_logger.epoch_bar.start()
    logging.info("Validate")
    if args.evaluate:
        depth_error, normalized = validate(val_loader, model, 0, term_logger,
                                           output_writers)
        term_logger.test_writer.write(
            ' * Depth error : {:.3f}, normalized : {:.3f}'.format(
                depth_error, normalized))
        return
    logging.info("epoch loop for %d time" % args.epochs)
    for epoch in range(args.epochs):
        logging.info("<epoch>=%d :start" % epoch)
        term_logger.epoch_bar.update(epoch)
        #scheduler.module.step()
        scheduler.step()

        # train for one epoch
        logging.info("train for one epoch: start       ")
        term_logger.reset_train_bar()
        term_logger.train_bar.start()
        logging.info("it might take more than 3min     ")
        train_loss, train_error, train_normalized_error = train(
            train_loader, model, optimizer, args.epoch_size, term_logger,
            train_writer)
        logging.info("train for one epoch: done         ")

        term_logger.train_writer.write(
            ' * Avg Loss : {:.3f}, Avg Depth error : {:.3f}, normalized : {:.3f}'
            .format(train_loss, train_error, train_normalized_error))
        train_writer.add_scalar('metric_error', train_error, epoch)
        train_writer.add_scalar('metric_normalized_error',
                                train_normalized_error, epoch)

        # evaluate on validation set
        logging.info("evaluate on validation set")
        term_logger.reset_test_bar()
        term_logger.test_bar.start()
        depth_error, normalized = validate(val_loader, model, epoch,
                                           term_logger, output_writers)
        term_logger.test_writer.write(
            ' * Depth error : {:.3f}, normalized : {:.3f}'.format(
                depth_error, normalized))
        val_writer.add_scalar('metric_error', depth_error, epoch)
        val_writer.add_scalar('metric_normalized_error', normalized, epoch)

        if best_error < 0:
            best_error = depth_error

        # remember lowest error and save checkpoint
        logging.info("remember lowest error and save checkpoint")
        is_best = depth_error < best_error
        best_error = min(depth_error, best_error)
        util.save_checkpoint(
            args.save_path, {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_error': best_error,
                'bn': args.bn,
                'with_confidence': False,
                'activation_function': args.activation_function,
                'clamp': args.clamp,
                'mean': mean,
                'std': std
            }, is_best)

        with open(os.path.join(args.save_path, args.log_summary),
                  'a') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')
            writer.writerow([train_loss, train_error, depth_error])
        logging.info("epoch=%d done" % epoch)
    term_logger.epoch_bar.finish()
Beispiel #5
0
def main():
    global args, best_error, viz
    args = util.set_params(parser)

    train_writer = SummaryWriter(args.save_path / 'train')
    val_writer = SummaryWriter(args.save_path / 'val')
    output_writers = []
    if args.log_output:
        for i in range(3):
            output_writers.append(
                SummaryWriter(args.save_path / 'val' / str(i)))
    torch.manual_seed(args.seed)

    # Data loading code
    mean = [0.5, 0.5, 0.5]
    std = [0.2, 0.2, 0.2]
    normalize = transforms.Normalize(mean=mean, std=std)
    input_transform = transforms.Compose([
        co_transforms.ArrayToTensor(),
        transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]), normalize
    ])
    target_transform = transforms.Compose(
        [co_transforms.Clip(0, 100),
         co_transforms.ArrayToTensor()])
    co_transform = co_transforms.Compose([
        co_transforms.RandomVerticalFlip(),
        co_transforms.RandomHorizontalFlip()
    ])

    print("=> fetching scenes in '{}'".format(args.data))
    train_set, val_set = datasets.still_box(args.data,
                                            transform=input_transform,
                                            target_transform=target_transform,
                                            co_transform=co_transform,
                                            split=args.split,
                                            seed=args.seed)
    print(
        '{} samples found, {} train scenes and {} validation samples '.format(
            len(val_set) + len(train_set), len(train_set), len(val_set)))
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    if args.epoch_size == 0:
        args.epoch_size = len(train_loader)
    # create model
    if args.pretrained:
        data = torch.load(args.pretrained)
        assert (not data['with_confidence'])
        print("=> using pre-trained model '{}'".format(data['arch']))
        model = models.DepthNet(batch_norm=data['bn'],
                                clamp=args.clamp,
                                depth_activation=args.activation_function)
        model.load_state_dict(data['state_dict'])
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.DepthNet(batch_norm=args.bn,
                                clamp=args.clamp,
                                depth_activation=args.activation_function)

    model = model.cuda()
    cudnn.benchmark = True

    assert (args.solver in ['adam', 'sgd'])
    print('=> setting {} solver'.format(args.solver))
    if args.solver == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     betas=(args.momentum, args.beta),
                                     weight_decay=args.weight_decay)
    elif args.solver == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    dampening=args.momentum)

    with open(os.path.join(args.save_path, args.log_summary), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter='\t')
        writer.writerow([
            'train_loss', 'train_depth_error', 'normalized_train_depth_error',
            'depth_error', 'normalized_depth_error'
        ])

    with open(os.path.join(args.save_path, args.log_full), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter='\t')
        writer.writerow(['train_loss', 'train_depth_error'])

    term_logger = TermLogger(n_epochs=args.epochs,
                             train_size=min(len(train_loader),
                                            args.epoch_size),
                             test_size=len(val_loader))
    term_logger.epoch_bar.start()

    if args.evaluate:
        depth_error, normalized = validate(val_loader, model, 0, term_logger,
                                           output_writers)
        term_logger.test_writer.write(
            ' * Depth error : {:.3f}, normalized : {:.3f}'.format(
                depth_error, normalized))
        return

    for epoch in range(args.epochs):
        term_logger.epoch_bar.update(epoch)
        util.adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        term_logger.reset_train_bar()
        term_logger.train_bar.start()
        train_loss, train_error, train_normalized_error = train(
            train_loader, model, optimizer, args.epoch_size, term_logger,
            train_writer)
        term_logger.train_writer.write(
            ' * Avg Loss : {:.3f}, Avg Depth error : {:.3f}, normalized : {:.3f}'
            .format(train_loss, train_error, train_normalized_error))
        train_writer.add_scalar('metric_error', train_error, epoch)
        train_writer.add_scalar('metric_normalized_error',
                                train_normalized_error, epoch)

        # evaluate on validation set
        term_logger.reset_test_bar()
        term_logger.test_bar.start()
        depth_error, normalized = validate(val_loader, model, epoch,
                                           term_logger, output_writers)
        term_logger.test_writer.write(
            ' * Depth error : {:.3f}, normalized : {:.3f}'.format(
                depth_error, normalized))
        val_writer.add_scalar('metric_error', depth_error, epoch)
        val_writer.add_scalar('metric_normalized_error', normalized, epoch)

        if best_error < 0:
            best_error = depth_error

        # remember lowest error and save checkpoint
        is_best = depth_error < best_error
        best_error = min(depth_error, best_error)
        util.save_checkpoint(
            args.save_path, {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_EPE': best_error,
                'bn': args.bn,
                'with_confidence': False,
                'activation_function': args.activation_function,
                'clamp': args.clamp,
                'mean': mean,
                'std': std
            }, is_best)

        with open(os.path.join(args.save_path, args.log_summary),
                  'a') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')
            writer.writerow([train_loss, train_error, depth_error])
    term_logger.epoch_bar.finish()