Exemplo n.º 1
0
def create_models():

    CNN_type = GLOBALS.CONFIG['CNN_model']

    Dense_NN, CNN = get_network(CNN_type, dense_layers=GLOBALS.CONFIG['dense_model'], \
    CNN_input_shape=GLOBALS.CONFIG['CNN_input_shape'], input_shape=GLOBALS.CONFIG['input_shape'])

    #CNN.load_weights

    if GLOBALS.CONFIG['pretrained']:
        CNN.trainable = False

    Multi_Input = tf.keras.layers.concatenate([Dense_NN.output, CNN.output])

    #Not updated from 63 commit
    model = Model(inputs = [Dense_NN.input , CNN.input], outputs = create_concat_network(Multi_Input), name="combined")
    if GLOBALS.CONFIG['pretrained']:
        model.load_weights(GLOBALS.CONFIG['pretrained_weights_path'])
    print("model.name (combined): %s" % model.name)
    print("model.name (CNN): %s" % CNN.name)
    print("model.trainable (CNN): %s" % CNN.trainable)
    print("model.name (Dense): %s" % Dense_NN.name)
    print("model.trainable (Dense_NN): %s" % Dense_NN.trainable)
    model.summary()

    optimizer_functions={'Adam':keras.optimizers.Adam,'SGD':keras.optimizers.SGD,'RMSProp':keras.optimizers.RMSprop,'Adadelta':keras.optimizers.Adadelta}
    optimizer=optimizer_functions[GLOBALS.CONFIG['optimizer']](lr = GLOBALS.CONFIG['learning_rate'])

    with suppress_stdout():
        model.compile(optimizer=optimizer, loss = GLOBALS.CONFIG['loss_function'],
            metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
    return model, optimizer
Exemplo n.º 2
0
def main():
    args = get_args()
    args.mean = torch.tensor((0.4914, 0.4822, 0.4465)).view(3, 1, 1).cuda()
    args.std = torch.tensor((0.2471, 0.2435, 0.2616)).view(3, 1, 1).cuda()
    dataset = get_dataset(args)
    args.mean = dataset.mean
    args.std = dataset.std
    args.dataset = dataset
    model = get_network(args)
    model.load_state_dict(torch.load(args.checkpoint)["state_dict"])
    eval(model, args, dataset)
Exemplo n.º 3
0
def create_CNN():
    CNN_type = GLOBALS.CONFIG['CNN_model']
    _, CNN = get_network(CNN_type, dense_layers=GLOBALS.CONFIG['dense_model'], CNN_input_shape=GLOBALS.CONFIG['CNN_input_shape'], CNN_output_shape=GLOBALS.CONFIG['CNN_output_shape'], input_shape=GLOBALS.CONFIG['input_shape'])
    optimizer_functions={'Adam':keras.optimizers.Adam}
    optimizer=optimizer_functions[GLOBALS.CONFIG['optimizer']](lr= GLOBALS.CONFIG['learning_rate'])

    model = CNN

    with suppress_stdout():
        model.compile(optimizer=optimizer, loss = GLOBALS.CONFIG['loss_function'],
            metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
    return model
Exemplo n.º 4
0
 def reset(self, learning_rate: float) -> None:
     self.performance_statistics = dict()
     self.network = get_network(name=self.config['network'],
                                num_classes=self.num_classes)
     self.metrics = Metrics(list(self.network.parameters()),
                            p=self.config['p'])
     # TODO add other parallelisms
     if self.device == 'cpu':
         print("Resetting cpu-based network")
     elif self.dist:
         if self.gpu is not None:
             torch.cuda.set_device(self.gpu)
             self.network.cuda(self.gpu)
             self.network = torch.nn.parallel.DistributedDataParallel(
                 self.network, device_ids=[self.gpu])
         else:
             self.network.cuda()
             self.network = torch.nn.parallel.DistributedDataParallel(
                 self.network)
     elif self.gpu is not None:
         torch.cuda.set_device(self.gpu)
         self.network = self.network.cuda(self.gpu)
     else:
         if isinstance(self.network, VGG):
             self.network.features = torch.nn.DataParallel(
                 self.network.features)
             self.network.cuda()
         else:
             self.network = torch.nn.DataParallel(self.network)
     self.optimizer, self.scheduler = get_optimizer_scheduler(
         optim_method=self.config['optimizer'],
         lr_scheduler=self.config['scheduler'],
         init_lr=learning_rate,
         net_parameters=self.network.parameters(),
         listed_params=list(self.network.parameters()),
         train_loader_len=len(self.train_loader),
         mini_batch_size=self.config['mini_batch_size'],
         max_epochs=self.config['max_epochs'],
         optimizer_kwargs=self.config['optimizer_kwargs'],
         scheduler_kwargs=self.config['scheduler_kwargs'])
     self.early_stop.reset()
Exemplo n.º 5
0
def create_config_file(agent_id: int, config_dir, pbt_input_base_dir):
    agent = models.get_agent(agent_id)
    evolution = models.get_evolution(agent["evolution"])
    evolution_size = int(evolution["steps_stop"] - evolution["steps_start"])
    network = models.get_network(evolution["network"])
    with open(config_dir + "base_agent_config.yaml") as base_config:
        cfg = yaml.safe_load(base_config)

        # Write config
        with open(get_config_file_name(agent["uuid"], config_dir),
                  'w') as config_file:
            name = str(network["name"]) + "_" + str(agent["uuid"])

            cfg['gpu'] = int(agent['gpu'])
            cfg['name'] = str(name)
            cfg['training']['lr_values'] = [float(agent["lr_values"])]
            cfg['training']['lr_boundaries'] = []
            cfg['training']['total_steps'] = evolution_size
            cfg['training']['checkpoint_steps'] = evolution_size
            cfg['training']['test_steps'] = int(1000)

            cfg['model'] = {
                'filters': int(network["filters"]),
                'policy_channels': int(network["policy_channels"]),
                'residual_blocks': int(network["residual_blocks"]),
                'se_ratio': int(network["se_ratio"])
            }

            cfg['dataset'] = {
                'input_train':
                get_pbt_train_path(evolution["iteration"], pbt_input_base_dir),
                'input_test':
                get_pbt_test_path(evolution["iteration"], pbt_input_base_dir),
                'num_chunks':
                evolution_size * 25,
                'train_ratio':
                float(20 / 25)
            }

            yaml.dump(cfg, config_file)
Exemplo n.º 6
0
def run():
    args = parse_args()

    # Set-up output directories
    dt = datetime.datetime.now().strftime('%y%m%d_%H%M')
    net_desc = '{}_{}'.format(dt, '_'.join(args.description.split()))

    save_folder = os.path.join(args.outdir, net_desc)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    tb = tensorboardX.SummaryWriter(os.path.join(args.logdir, net_desc))

    # Load Dataset
    logging.info('Loading {} Dataset...'.format(args.dataset.title()))
    Dataset = get_dataset(args.dataset)

    # train_dataset = Dataset(args.dataset_path, start=0.0, end=args.split, ds_rotate=args.ds_rotate,
    #                         random_rotate=True, random_zoom=True,
    #                         include_depth=args.use_depth, include_rgb=args.use_rgb)
    train_dataset = Dataset(args.dataset_path,
                            start=0.0,
                            end=args.split,
                            ds_rotate=args.ds_rotate,
                            random_rotate=True,
                            random_zoom=True,
                            include_depth=args.use_depth,
                            include_rgb=args.use_rgb)
    train_data = torch.utils.data.DataLoader(train_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.num_workers)
    val_dataset = Dataset(args.dataset_path,
                          start=args.split,
                          end=1.0,
                          ds_rotate=args.ds_rotate,
                          random_rotate=False,
                          random_zoom=False,
                          include_depth=args.use_depth,
                          include_rgb=args.use_rgb)
    val_data = torch.utils.data.DataLoader(val_dataset,
                                           batch_size=1,
                                           shuffle=False,
                                           num_workers=args.num_workers)
    logging.info('Done')

    # Load the network
    logging.info('Loading Network...')
    input_channels = 1 * args.use_depth + 3 * args.use_rgb
    ggcnn = get_network(args.network)

    net = ggcnn(input_channels=input_channels)
    device = torch.device("cpu")
    # device = torch.device("cuda:0")
    net = net.to(device)
    optimizer = optim.Adam(net.parameters())
    logging.info('Done')

    # Print model architecture.
    summary(net, (input_channels, 200, 200))
    f = open(os.path.join(save_folder, 'arch.txt'), 'w')
    sys.stdout = f
    summary(net, (input_channels, 200, 200))
    sys.stdout = sys.__stdout__
    f.close()

    best_iou = 1000.0
    for epoch in range(args.epochs):
        logging.info('Beginning Epoch {:02d}'.format(epoch))
        train_results = train(epoch,
                              net,
                              device,
                              train_data,
                              optimizer,
                              args.batches_per_epoch,
                              vis=args.vis)

        # Log training losses to tensorboard
        tb.add_scalar('loss/train_loss', train_results['loss'], epoch)
        for n, l in train_results['losses'].items():
            tb.add_scalar('train_loss/' + n, l, epoch)

        # Run Validation
        logging.info('Validating...')
        test_results = validate(net, device, val_data, args.val_batches)
        #logging.info('%d/%d = %f' % (test_results['correct'], test_results['correct'] + test_results['failed'],
        #                             test_results['correct']/(test_results['correct']+test_results['failed'])))

        # Log validation results to tensorbaord
        #tb.add_scalar('loss/IOU', test_results['correct'] / (test_results['correct'] + test_results['failed']), epoch)
        tb.add_scalar('loss/val_loss', test_results['loss'], epoch)
        for n, l in test_results['losses'].items():
            tb.add_scalar('val_loss/' + n, l, epoch)

        # Save best performing network
        #iou = test_results['correct'] / (test_results['correct'] + test_results['failed'])
        iou = test_results['loss']
        # only save if this epoch is better than prev one
        # always save the first one and then every 10
        if iou < best_iou or epoch == 0 or (epoch % 10) == 0:
            torch.save(
                net,
                os.path.join(save_folder,
                             'epoch_%02d_iou_%0.2f' % (epoch, iou)))
            best_iou = iou
Exemplo n.º 7
0
def run():
    args = parse_args()

    # Vis window
    if args.vis:
        cv2.namedWindow('Display', cv2.WINDOW_NORMAL)

    # Set-up output directories
    dt = datetime.datetime.now().strftime('%y%m%d_%H%M')
    net_desc = '{}_{}'.format(dt, '_'.join(args.description.split()))

    save_folder = os.path.join(args.outdir, net_desc)
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    tb = tensorboardX.SummaryWriter(os.path.join(args.logdir, net_desc))

    # Load Dataset
    logging.info('Loading {} Dataset...'.format(args.dataset.title()))
    Dataset = get_dataset(args.dataset)

    train_dataset = Dataset(args.dataset_path,
                            start=0.0,
                            end=args.split,
                            ds_rotate=args.ds_rotate,
                            random_rotate=True,
                            random_zoom=True,
                            include_depth=args.use_depth,
                            include_rgb=args.use_rgb)
    train_data = torch.utils.data.DataLoader(train_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.num_workers)
    val_dataset = Dataset(args.dataset_path,
                          start=args.split,
                          end=1.0,
                          ds_rotate=args.ds_rotate,
                          random_rotate=True,
                          random_zoom=True,
                          include_depth=args.use_depth,
                          include_rgb=args.use_rgb)
    val_data = torch.utils.data.DataLoader(val_dataset,
                                           batch_size=1,
                                           shuffle=False,
                                           num_workers=args.num_workers)
    logging.info('Done')

    # Load the network
    logging.info('Loading Network...')
    input_channels = 1 * args.use_depth + 3 * args.use_rgb
    ggcnn = get_network(args.network)

    # net = ggcnn(input_channels=input_channels)
    print(torch.cuda.is_available())
    print(torch.cuda.device_count())
    device = torch.device("cuda:0")

    # net = torch.load("./ggcnn_weights_cornell/ggcnn_epoch_23_cornell",map_location=device)
    # net = torch.load("output/models2/cnn3/epoch_50_iou_0.49",map_location=device)
    net = torch.load("output/models2/211209_2216_/epoch_49_iou_0.22",
                     map_location=device)
    # net = net.to(device)
    optimizer = optim.Adam(net.parameters())
    logging.info('Done')

    # Print model architecture.
    summary(net, (input_channels, 300, 300))
    f = open(os.path.join(save_folder, 'arch.txt'), 'w')
    sys.stdout = f
    summary(net, (input_channels, 300, 300))
    sys.stdout = sys.__stdout__
    f.close()
    # torch.load(os.path.join(save_folder,"epoch_10_iou_0.00_statedict.pt"))
    best_iou = 0.0
    for epoch in range(args.epochs + 1):
        logging.info('Beginning Epoch {:02d}'.format(epoch))
        train_results = train(epoch,
                              net,
                              device,
                              train_data,
                              optimizer,
                              args.batches_per_epoch,
                              vis=args.vis)

        # Log training losses to tensorboard
        tb.add_scalar('loss/train_loss', train_results['loss'], epoch)
        for n, l in train_results['losses'].items():
            tb.add_scalar('train_loss/' + n, l, epoch)

        # Run Validation
        logging.info('Validating...')
        test_results = validate(net, device, val_data, args.val_batches)
        logging.info('%d/%d = %f' %
                     (test_results['correct'], test_results['correct'] +
                      test_results['failed'], test_results['correct'] /
                      (test_results['correct'] + test_results['failed'])))

        # Log validation results to tensorbaord
        tb.add_scalar(
            'loss/IOU', test_results['correct'] /
            (test_results['correct'] + test_results['failed']), epoch)
        tb.add_scalar('loss/val_loss', test_results['loss'], epoch)
        for n, l in test_results['losses'].items():
            tb.add_scalar('val_loss/' + n, l, epoch)

        # Save best performing network
        iou = test_results['correct'] / (test_results['correct'] +
                                         test_results['failed'])
        if iou > best_iou or epoch == 0 or (epoch % 10) == 0:
            torch.save(
                net,
                os.path.join(save_folder,
                             'epoch_%02d_iou_%0.2f' % (epoch, iou)))
            torch.save(
                net.state_dict(),
                os.path.join(
                    save_folder,
                    'epoch_%02d_iou_%0.2f_statedict.pt' % (epoch, iou)))
            if iou > best_iou:
                best_iou = iou
Exemplo n.º 8
0
def run(args, save_folder, log_folder):
    tb = tensorboardX.SummaryWriter(log_folder)

    # Load Dataset
    logging.info('Loading {} Dataset...'.format(args.dataset.title()))
    Dataset = get_dataset(args.dataset)

    train_dataset = Dataset(args.dataset_path, start=0.0, end=args.split, ds_rotate=args.ds_rotate,
                            random_rotate=True, random_zoom=True,
                            include_depth=args.use_depth, include_rgb=args.use_rgb)
    train_data = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers
    )
    val_dataset = Dataset(args.dataset_path, start=args.split, end=1.0, ds_rotate=args.ds_rotate,
                          random_rotate=True, random_zoom=True,
                          include_depth=args.use_depth, include_rgb=args.use_rgb)
    val_data = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=args.num_workers
    )
    logging.info('Done')

    # Load the network
    logging.info('Loading Network...')
    input_channels = 1*args.use_depth + 3*args.use_rgb
    ggcnn = get_network(args.network)

    net = ggcnn(input_channels=input_channels)
    device = torch.device("cuda:0")
    net = net.to(device)
    optimizer = optim.Adam(net.parameters())
    logging.info('Done')

    # Print model architecture.
    summary(net, (input_channels, 300, 300))
    f = open(os.path.join(save_folder, 'arch.txt'), 'w')
    sys.stdout = f
    summary(net, (input_channels, 300, 300))
    sys.stdout = sys.__stdout__
    f.close()

    best_iou = 0.0
    for epoch in range(args.epochs):
        logging.info('Beginning Epoch {:02d}'.format(epoch))
        train_results = train(epoch, net, device, train_data, optimizer,
                              args.batches_per_epoch, vis=args.vis)

        # Log training losses to tensorboard
        tb.add_scalar('loss/train_loss', train_results['loss'], epoch)
        for n, l in train_results['losses'].items():
            tb.add_scalar('train_loss/' + n, l, epoch)

        # Run Validation
        logging.info('Validating...')
        test_results = validate(net, device, val_data, args.val_batches)
        logging.info('%d/%d = %f' % (test_results['correct'], test_results['correct'] + test_results['failed'],
                                     test_results['correct']/(test_results['correct']+test_results['failed'])))

        # Log validation results to tensorbaord
        tb.add_scalar('loss/IOU', test_results['correct'] /
                      (test_results['correct'] + test_results['failed']), epoch)
        tb.add_scalar('loss/val_loss', test_results['loss'], epoch)
        for n, l in test_results['losses'].items():
            tb.add_scalar('val_loss/' + n, l, epoch)

        # Save best performing network
        iou = test_results['correct'] / (test_results['correct'] + test_results['failed'])
        if iou > best_iou or epoch == 0 or (epoch % 10) == 0:
            torch.save(net, os.path.join(save_folder, 'epoch_%02d_iou_%0.2f' % (epoch, iou)))
            torch.save(net.state_dict(), os.path.join(
                save_folder, 'epoch_%02d_iou_%0.2f_statedict.pt' % (epoch, iou)))
            best_iou = iou
Exemplo n.º 9
0
if GLOBALS.CONFIG is None:
    print("error in initialize_hyper")
    sys.exit(1)

# load dense mode

# load data

# print("start initializing dataset")
# initialize_datasets()
# print("finished initializing dataset")
data_dict = create_data()

GLOBALS.CONFIG['input_shape'] = data_dict['validation_stats'].shape[1]
Dense_NN, _ = get_network(GLOBALS.CONFIG['CNN_model'],
                          dense_layers=GLOBALS.CONFIG['dense_model'],
                          CNN_input_shape=GLOBALS.CONFIG['CNN_input_shape'],
                          input_shape=GLOBALS.CONFIG['input_shape'])

print(Dense_NN.summary())

# load weights
# path = "Output_Files\dense_nn\output_folder_sequential_0.1_None_raw_dataset_3\model_weights\model_weights.h5"
# path = "Output_Files\dense_output_layer_1\output_folder_functional_3_0.001_None_raw_dataset_50\model_weights\model_weights.h5"
# path = "Output_Files\dense_output_layer_1_attempt2\output_folder_functional_3_0.001_None_raw_dataset_3\model_weights\model_weights.h5"
# path = "Output_Files\dense_output_layer_1_attempt2\output_folder_functional_5_0.002_None_raw_dataset_3\model_weights\model_weights.h5"
# path = "Output_Files\dense_nn\output_folder_sequential_0.1_None_raw_dataset_3\model_weights\model_weights.h5"

# mape of 64.7
path = "Output_Files\dense_nn\output_folder_sequential_1e-04_None_raw_dataset_250\model_weights\model_weights.h5"

Dense_NN.load_weights(path)
Exemplo n.º 10
0
def main():
    args = get_args()

    if not os.path.exists(args.fname):
        os.makedirs(args.fname)
    if not os.path.exists(args.checkpoints):
        os.makedirs(args.checkpoints)

    if args.tensorboard:
        from tensorboardX import SummaryWriter

        writer = SummaryWriter(args.fname[8:])
    else:
        import wandb

        wandb.init(
            project=args.project,
            name=args.fname.replace("/", "_")[8:],
            config=args.__dict__,
            settings=wandb.Settings(_disable_stats=True),
        )
        writer = None

    logger = logging.getLogger(__name__)
    logging.basicConfig(
        format="[%(asctime)s] - %(message)s",
        datefmt="%Y/%m/%d %H:%M:%S",
        level=logging.DEBUG,
        handlers=[
            logging.FileHandler(os.path.join(args.fname, "output.log")),
            logging.StreamHandler(),
        ],
    )

    logger.info(args)
    logger.info(git_version())

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    dataset = get_dataset(args)

    args.mean = dataset.mean
    args.std = dataset.std
    args.dataset = dataset
    model = get_network(args)

    opt = torch.optim.SGD(model.parameters(),
                          args.lr,
                          momentum=0.9,
                          weight_decay=args.weight_decay)
    scheduler = Lr_schedule(
        opt, milestones=[int(v) for v in args.lr_adjust.split(",")], gamma=0.1)

    if args.resume_checkpoint != "":
        state = torch.load(args.resume_checkpoint)
        model.load_state_dict(state["state_dict"])
        opt.load_state_dict(state["optimizer"])
        args.epoch = state["epoch"] + 1

    if not args.no_amp:
        # from torch.cuda.amp.grad_scaler import GradScaler
        # scaler = GradScaler()
        # args.scaler = scaler

        from apex import amp

        model, opt = amp.initialize(model,
                                    opt,
                                    opt_level="O1",
                                    loss_scale=1.0,
                                    verbosity=False)
        args.amp = amp

    args.opt = opt

    attack = get_attack(args, model=model)
    defense = get_defense(args, model, attack)

    trainer = Trainer(
        args=args,
        model=model,
        dataset=dataset,
        logger=logger,
        optimizer=opt,
        scheduler=scheduler,
        attack=attack,
        writer=writer,
        defense=defense,
    )
    trainer.train()

    # logger.info("Begin evaluating last")
    # eval(model, args, dataset, logger)

    if not args.tensorboard:
        wandb.finish()
Exemplo n.º 11
0
    if args.jacquard_output and args.dataset != 'jacquard':
        raise ValueError(
            '--jacquard-output can only be used with the --dataset jacquard option.'
        )
    if args.jacquard_output and args.augment:
        raise ValueError(
            '--jacquard-output can not be used with data augmentation.')

    return args


if __name__ == '__main__':
    args = parse_args()

    # Load Network
    ggcnn = get_network(args.network)
    model = torch.load(args.path, map_location='cpu')
    #print(model)

    depth_im = np.load(
        "/home/silvia/dex-net/data/datasets/fc_4/tensors/depth_images_00000.npz"
    )['arr_0'][50]
    depth_tensor = torch.from_numpy(
        np.expand_dims(depth_im.reshape((200, 200)),
                       0).astype(np.float32)).reshape((1, 1, 200, 200))
    device = torch.device("cpu")

    xc = depth_tensor.to(device)
    pos_pred, cos_pred, sin_pred = model(xc)

    x_n = xc.detach().numpy().reshape(200, 200)