Beispiel #1
0
    def model_fn(features, labels, mode):
        features, s = features['features'], features['s']
        y_pred = get_model(features, opts)

        tf.summary.image("green-is-predicted",
                         render_steering_tf(features, labels, s, y_pred))

        if mode == tf.estimator.ModeKeys.PREDICT:
            predictions = {'prediction': y_pred}
            return tf.estimator.EstimatorSpec(mode=mode,
                                              predictions=predictions)

        loss = get_loss(y_pred, labels)
        if mode == tf.estimator.ModeKeys.TRAIN:
            global_step = tf.train.get_global_step()
            lr = tf.train.exponential_decay(learning_rate=opts.learning_rate,
                                            global_step=global_step,
                                            decay_steps=1,
                                            decay_rate=opts.learning_decay)
            optimizer = tf.train.AdamOptimizer(lr)
            train_op = optimizer.minimize(loss, global_step=global_step)
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op)
        elif mode == tf.estimator.ModeKeys.EVAL:
            return tf.estimator.EstimatorSpec(mode=mode, loss=loss)
Beispiel #2
0
    def build_model(self):
        """
        Instantiate the model, loss criterion, and optimizer
        """

        # instantiate anchor boxes
        anchor_boxes = AnchorBox(new_size=self.new_size,
                                 config=self.anchor_config)
        self.anchor_boxes = anchor_boxes.get_boxes()

        if torch.cuda.is_available() and self.use_gpu:
            self.anchor_boxes = self.anchor_boxes.cuda()

        # instatiate model
        self.model = get_model(config=self.config, anchors=self.anchor_boxes)

        # instatiate loss criterion
        self.criterion = get_loss(config=self.config)

        # instatiate optimizer
        self.optimizer = optim.SGD(params=self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        self.scaler = GradScaler()

        # print network
        self.print_network(self.model)

        # use gpu if enabled
        if torch.cuda.is_available() and self.use_gpu:
            self.model.cuda()
            self.criterion.cuda()
def main(args):

    print(f'Name: {args.name}')
    print('Data preparing...')
    test_loader = get_dataloader(path=args.test_path,
                                 mode='test',
                                 side_size=args.side_size,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers)
    print('Data prepared!')
    checkpoints_path = f'checkpoints/{args.name}'

    losses = {'cross': nn.CrossEntropyLoss, 'focal': FocalLoss}

    device = torch.device('cuda')

    model = get_model(args.model_type, args.num_classes)

    criterion = losses[args.loss_type]()

    weights = torch.load(os.path.join(checkpoints_path,
                                      f'epoch_{args.epoch}.pth'),
                         map_location='cpu')
    model.load_state_dict(weights['model'])

    columns = ['Test loss', 'Test accuracy']
    information = (pd.DataFrame(columns=columns),
                   f'{args.name}_test_{args.epoch}')
    test(model, criterion, device, test_loader, args, information,
         checkpoints_path)
def initTF():
    global tf_session, ops
    with tf.device("/gpu:" + str(GPU_INDEX)):
        pointclouds_pl, labels_pl, _ = model.placeholder_inputs(1, NUM_POINT)
        print(tf.shape(pointclouds_pl))
        is_training_pl = tf.placeholder(tf.bool, shape=())

        pred, _ = model.get_model(pointclouds_pl,
                                  is_training_pl,
                                  NUM_CLASSES,
                                  hyperparams=PARAMS)

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    tf_session = tf.Session(config=config)

    # Restore variables from disk.
    saver.restore(tf_session, CHECKPOINT)
    print("Model restored.")

    ops = {
        "pointclouds_pl": pointclouds_pl,
        "is_training_pl": is_training_pl,
        "pred": pred
    }
    def __init__(self, config, num_classes, train_triplet=False):
        """

        :param config: 配置参数
        :param num_classes: 训练集的类别数;类型为int
        :param train_triplet: 是否只训练triplet损失;类型为bool
        """
        self.num_classes = num_classes

        self.model_name = config.model_name
        self.last_stride = config.last_stride
        self.num_gpus = torch.cuda.device_count()
        print('Using {} GPUS'.format(self.num_gpus))
        print('NUM_CLASS: {}'.format(self.num_classes))
        print('USE LOSS: {}'.format(config.selected_loss))

        # 加载模型,只要有GPU,则使用DataParallel函数,当GPU有多个GPU时,调用sync_bn函数
        self.model = get_model(self.model_name, self.num_classes, self.last_stride)
        if torch.cuda.is_available():
            self.model = torch.nn.DataParallel(self.model)
            if self.num_gpus > 1:
                self.model = convert_model(self.model)
            self.model = self.model.cuda()

        # 加载超参数
        self.epoch = config.epoch

        # 实例化实现各种子函数的 solver 类
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.solver = Solver(self.model, self.device)

        # 加载损失函数
        self.criterion = Loss(self.model_name, config.selected_loss, config.margin, self.num_classes)

        # 加载优化函数
        self.optim = get_optimizer(config, self.model)

        # 加载学习率衰减策略
        self.scheduler = get_scheduler(config, self.optim)

        # 创建保存权重的路径
        self.model_path = os.path.join(config.save_path, config.model_name)
        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        # 如果只训练Triplet损失
        if train_triplet:
            self.solver.load_checkpoint(os.path.join(self.model_path, '{}.pth'.format(self.model_name)))

        # 保存json文件和初始化tensorboard
        TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())
        self.writer = SummaryWriter(log_dir=os.path.join(self.model_path, TIMESTAMP))
        with codecs.open(self.model_path + '/' + TIMESTAMP + '.json', 'w', "utf-8") as json_file:
            json.dump({k: v for k, v in config._get_kwargs()}, json_file, ensure_ascii=False)

        # 设置随机种子,注意交叉验证部分划分训练集和验证集的时候,要保持种子固定
        self.seed = int(time.time())
        seed_torch(self.seed)
        with open(self.model_path + '/' + TIMESTAMP + '.pkl', 'wb') as f:
            pickle.dump({'seed': self.seed}, f, -1)
Beispiel #6
0
    def __init__(self, config):
        super(Trainer, self).__init__()
        self.use_cuda = torch.cuda.is_available()
        self.device = 'cuda' if self.use_cuda else 'cpu'
        # self.device ='cuda:1'

        # model
        self.modef = config['model']
        self.model = get_model(config)
        self.input_dims = config['input_dims']
        self.z_dims = config['z_dims']
        self.prior = distributions.MultivariateNormal(torch.zeros(self.z_dims),
                                                      torch.eye(self.z_dims))

        # train
        self.max_iter = config['max_iter']
        self.global_iter = 1
        self.mseWeight = config['mse_weight']
        self.lr = config['lr']
        self.beta1 = config['beta1']
        self.beta2 = config['beta2']
        self.optim = optim.Adam(self.model.parameters(),
                                lr=self.lr,
                                betas=(self.beta1, self.beta2))
        self.implicit = 'implicit' in config and config['implicit']
        if self.implicit:
            self.train_inst = self.implicit_inst

        # saving
        self.ckpt_dir = config['ckpt_dir']
        os.makedirs(self.ckpt_dir, exist_ok=True)
        self.ckpt_name = config['ckpt_name']
        self.save_output = config['save_output']
        self.output_dir = config['output_dir']
        os.makedirs(self.output_dir, exist_ok=True)
        # saving
        if config['cont'] and self.ckpt_name is not None:
            self.load_checkpoint(self.ckpt_name)

        self.meta = defaultdict(list)

        self.gather_step = config['gather_step']
        self.display_step = config['display_step']
        self.save_step = config['save_step']

        # data
        self.dset_dir = config['dset_dir']
        self.dataset = config['dataset']
        self.data_type = config['data_type']
        if self.data_type == 'linear':
            self.draw_reconstruction = self.linear_reconstruction
            self.draw_generated = self.linear_generated
            self.visualize_traverse = self.linear_traverse
            self.traversal = self.linear_traversal
        self.batch_size = config['batch_size']
        self.img_size = 32 if 'image_size' not in config else config[
            'image_size']
        self.data_loader = get_dataset(config)
        self.val_loader = get_dataset(config, train=False)
Beispiel #7
0
def train_model_once(subject_id, i_valid_fold, config, model_state_dict=None):
    # Data loading
    data = get_dataset(subject_id, i_valid_fold,
                       config["experiment"]["dataset"], config)
    # import pickle
    # from base.base_data_loader import BaseDataLoader
    # from braindecode.datautil.splitters import split_into_train_valid_test
    # pickle_path = os.path.abspath(
    #     os.path.join(os.path.dirname(__file__), '..',
    #                  'data/bcic_iv_2a_all_9_subjects.pickle'))
    # with open(pickle_path, 'rb') as f:
    #     data = pickle.load(f)
    # data = data[0]
    # train, valid, test = split_into_train_valid_test(data, 4, 0)
    # data = BaseDataLoader(train, valid, test, 4)

    # Build model architecture
    model = get_model(data, model_state_dict, config)

    # Set iterator and metric function handle
    iterator = get_iterator(model, data, config)
    predict_label_func = get_prediction_func(config)

    # Get function handle of loss
    loss_function = get_loss(config)

    # Build optimizer, learning rate scheduler
    stop_criterion = get_stop_criterion(config)
    optimizer = get_optmizer(model, config)

    print(model)

    # Init trainer and train
    trainer = Trainer(
        data.train_set,
        data.validation_set,
        data.test_set,
        model,
        optimizer,
        iterator,
        loss_function,
        stop_criterion,
        model_constraint=MaxNormDefaultConstraint(),
        cuda=torch.cuda.is_available(),
        func_compute_pred_labels=predict_label_func,
        siamese=(config["experiment"]["type"] == "ccsa_da"),
    )
    trainer.train()

    # Save results
    log_training_results(trainer)
    file_state_dict = save_result_and_model(trainer, model, config)
    return file_state_dict
Beispiel #8
0
    def __init__(self, config, num_train_classes, pth_path, test_dataloader,
                 num_query):
        """

        :param config: 配置参数
        :param num_train_classes: 训练集类别数,用于初始化模型;类型为int
        :param pth_path: 权重文件路径;类型为str
        :param test_dataloader: 测试数据集的Dataloader
        :param num_query: 查询集数量;类型为int
        """
        self.num_train_classes = num_train_classes

        self.model_name = config.model_name
        self.last_stride = config.last_stride
        self.dist = config.dist
        self.num_gpus = torch.cuda.device_count()
        print('Using {} GPUS'.format(self.num_gpus))

        # 加载模型,只要有GPU,则使用DataParallel函数,当GPU有多个GPU时,调用sync_bn函数
        self.model = get_model(self.model_name, self.num_train_classes,
                               self.last_stride)
        if torch.cuda.is_available():
            self.model = torch.nn.DataParallel(self.model)
            if self.num_gpus > 1:
                self.model = convert_model(self.model)
            self.model = self.model.cuda()

        # 实例化实现各种子函数的 solver 类
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.solver = Solver(self.model, self.device)

        # 加载权重矩阵
        self.model = self.solver.load_checkpoint(pth_path)
        self.model.eval()

        # 每一个查询样本从数据库中取出最近的200个样本
        self.num_choose = 200
        self.num_query = num_query
        self.test_dataloader = test_dataloader

        self.pic_path_query = os.path.join(config.dataset_root, '初赛A榜测试集',
                                           'query_a')
        self.pic_path_gallery = os.path.join(config.dataset_root, '初赛A榜测试集',
                                             'gallery_a')

        self.demo_names = os.listdir('dataset/demo_data')
        self.demo_results_path = './results/test'
        if not os.path.exists(self.demo_results_path):
            os.makedirs(self.demo_results_path)
    def __init__(self, hparams):
        super(FakeClassificationModule, self).__init__()

        self.hparams = hparams
        model_info = self.hparams.model_info
        self.model = get_model(model_info['model_type'],
                               model_info['num_classes'],
                               model_info['pretrained'])
        if self.hparams.base_weights is not None:
            weights = torch.load(self.hparams.base_weights, map_location='cpu')
            self.model.load_state_dict(weights['model'], strict=False)
        self.optim_fn = torch.optim.__dict__[self.hparams.opt_name]
        self.loader_data = self.hparams.loader_data
        self.criterrion = self.criterrions[self.hparams.criterion]()
        self.ddp = True if len(self.hparams.gpus) > 1 else False
def main(args):

    print(f'Name: {args.name}')
    print('Data preparing...')
    train_loader = get_dataloader(path=args.train_path,
                                  mode='train',
                                  side_size=args.side_size,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers)
    valid_loader = get_dataloader(path=args.valid_path,
                                  mode='valid',
                                  side_size=args.side_size,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers)
    print('Data prepared!')

    checkpoints_path = f'checkpoints/{args.name}'
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    losses = {'cross': nn.CrossEntropyLoss, 'focal': FocalLoss}

    device = torch.device('cuda')

    model = get_model(args.model_type, args.num_classes)

    optimizer = Adam(model.parameters())

    criterion = losses[args.loss_type]()

    if args.start_epoch:
        weights = torch.load(os.path.join(checkpoints_path,
                                          f'epoch_{args.start_epoch}.pth'),
                             map_location='cpu')
        model.load_state_dict(weights['model'])
        optimizer.load_state_dict(weights['optimizer'])
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)

    columns = ['Train loss', 'Train accuracy', 'Valid loss', 'Valid accuracy']
    information = (pd.DataFrame(columns=columns),
                   f'{args.name}_from_{args.start_epoch}')

    train(model, optimizer, criterion, device, train_loader, valid_loader,
          args, information, checkpoints_path)
Beispiel #11
0
    def __init__(self, dataset):

        self.logger = logging.getLogger('main.server')

        self.dataset = dataset
        self.model = model.get_model()
        self.move_model_to_gpu()
        self.logger.info('Activate a server for training.')

        self.round_num = 0

        self.client, self.clientData = self.setup_clients()
        # print(">>> Activate clients number: {}".format(self.numClients))
        self.logger.info('Activate clients number: %d', self.numClients)

        self.errorfeedback = [0] * self.numClients

        self.output_metric = read_write_data.Metrics()
Beispiel #12
0
    def __init__(self, config, num_classes, pth_path, valid_dataloader, num_query):
        """

        :param config: 配置参数
        :param num_classes: 类别数;类型为int
        :param pth_path: 权重文件路径;类型为str
        :param valid_dataloader: 验证数据集的Dataloader
        :param num_query: 查询集数量;类型为int
        """
        self.num_classes = num_classes

        self.model_name = config.model_name
        self.last_stride = config.last_stride
        self.dist = config.dist
        self.num_gpus = torch.cuda.device_count()
        print('Using {} GPUS'.format(self.num_gpus))

        # 加载模型,只要有GPU,则使用DataParallel函数,当GPU有多个GPU时,调用sync_bn函数
        self.model = get_model(self.model_name, self.num_classes, self.last_stride)
        if torch.cuda.is_available():
            self.model = torch.nn.DataParallel(self.model)
            if self.num_gpus > 1:
                self.model = convert_model(self.model)
            self.model = self.model.cuda()

        # 实例化实现各种子函数的 solver 类
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.solver = Solver(self.model, self.device)

        # 加载权重矩阵
        self.model = self.solver.load_checkpoint(pth_path)
        self.model.eval()

        # 每一个查询样本从数据库中取出最近的200个样本
        self.num_choose = 10
        self.num_query = num_query
        self.valid_dataloader = valid_dataloader

        self.demo_results_path = './results/valid'
        if not os.path.exists(self.demo_results_path):
            os.makedirs(self.demo_results_path)
def test(best_step):
    tf.reset_default_graph()
    config = tf.ConfigProto(allow_soft_placement=True)
    if args.mode == 'train':
        args.load_graph = True
        args.meta_file_path = EXPERIMENT_PREFIX + '-' + str(
            best_step) + '.meta'
        args.restore_dir = EXPERIMENT_NAME
    with tf.Session(config=config) as session:
        _, _, ops = model.get_model(session=session,
                                    args=args,
                                    restore_only=True)
        test_results = evaluate(session=session,
                                ops=ops,
                                dataset=test_loader(epochs=1))

    print_results(test_results, args, msg='TEST RESULTS')
    print_test_evaluation_metrics(test_results['test_metrics'])
    if args.aspect_detector:
        compute_aspect_detection_results(test_results,
                                         RESTAURANT_ASPECT_WORD_INDEX_MAP)
Beispiel #14
0
    def __init__(self, config, logger, wandb):
        self.config = config
        self.logger = logger
        self.writer = SummaryWriter(config.log_dir)
        self.wandb = wandb
        cudnn.enabled = True

        # set up model
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model = get_model(config)
        if len(config.gpus) > 1:
            self.model = nn.DataParallel(self.model)
        self.model = self.model.to(self.device)
        self.best_acc = 0
        self.best_AUC = 0
        self.class_loss_func = nn.CrossEntropyLoss()
        self.pixel_loss = nn.L1Loss()
        if config.mode == 'train':
            # set up optimizer, lr scheduler and loss functions
            lr = config.lr
            self.optimizer = torch.optim.Adam(self.model.parameters(),
                                              lr=lr,
                                              betas=(.5, .999))
            self.scheduler = MultiStepLR(self.optimizer,
                                         milestones=[50, 150],
                                         gamma=0.1)
            self.wandb.watch(self.model)
            self.start_iter = 0

            # resume
            if config.training_resume:
                self.load(config.model_dir + '/' + config.training_resume)

            cudnn.benchmark = True
        elif config.mode == 'val':
            self.load(os.path.join(config.testing_model))
        else:
            self.load(os.path.join(config.testing_model))
Beispiel #15
0
    def reset(self, dataset, errorfeedback, round_num):
        self.cid = dataset[0]
        self.train_data = dataset[1]
        self.test_data = dataset[2]
        self.model = model.get_model()
        self.move_model_to_gpu()
        self.error = errorfeedback  #torch.zeros_like(self.get_flat_model_params())
        self.num_epochs = FLAGS.num_epochs
        self.round_num = round_num
        self.lr = FLAGS.client_lr
        # self.lr = self.get_lr()
        self.optimizer = optimizers.get_optimizer(self.model)

        self.train_dataloader = DataLoader(self.train_data,
                                           batch_size=FLAGS.client_batch_size,
                                           shuffle=True)
        self.test_dataloader = DataLoader(self.test_data,
                                          batch_size=FLAGS.client_batch_size,
                                          shuffle=False)

        self.input_shape = model.get_input_info()['input_shape']

        self.logger.debug("reset client %d", self.cid)
Beispiel #16
0
    def __init__(self, config, logger):
        self.config = config
        self.logger = logger
        self.writer = SummaryWriter(config.log_dir)
        cudnn.enabled = True

        # set up model
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model = get_model(config)
        self.model = self.model.to(self.device)

        if config.mode == 'train':
            # set up optimizer, lr scheduler and loss functions

            lr = config.lr
            self.optimizer = torch.optim.Adam(self.model.parameters(),
                                              lr=lr,
                                              betas=(.5, .999))
            self.scheduler = LinearRampdown(self.optimizer,
                                            rampdown_from=1000,
                                            rampdown_till=1200)

            self.class_loss_func = nn.CrossEntropyLoss()

            self.start_iter = 0

            # resume
            if config.training_resume:
                self.load(config.model_dir + '/' + config.training_resume)

            cudnn.benchmark = True

        elif config.mode == 'val':
            self.load(os.path.join(config.model_dir, config.validation_model))
        else:
            self.load(os.path.join(config.model_dir, config.testing_model))
Beispiel #17
0
def model_wrapper(wts_path, train=False, to_save_as=False, model_path=None):
    if model_path:
        return tf.keras.models.load_model(model_path)

    my_model = model.get_model()

    if wts_path:
        my_model.load_weights(wts_path)

    if train:

        class myCallback(Callback):
            def on_epoch_end(self, epoch, logs={}):
                if logs.get('accuracy') > 0.95 and logs.get(
                        'val_accuracy') > 0.95:
                    print('Stopping training')
                    my_model.stop_training = True

        callbacks = myCallback()

        mnist = tf.keras.datasets.mnist

        (x_train, y_train), (x_test, y_test) = mnist.load_data()

        # normalize the data
        x_train = x_train / 255.0
        x_test = x_test / 255.0

        my_model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
        print(my_model.evaluate(x_test, y_test))

        if wts_path:
            my_model.save_weights('{}-{}'.format(wts_path, round(time.time())))
        else:
            my_model.save_weights(to_save_as)

    return my_model
Beispiel #18
0
                                     is_training=False)

trainops = TrainOps(params, num_records=len(train_generator))

optimizer = get_optimizer(params, trainops)
loss_fn = get_loss(params)

model_metrics = ModelMetrics(params)
tb_callback = TensorboardCallback(model_dir=params.model_directory)
model_checkpoint = ModelCheckpointCustom(monitor="val_acc",
                                         model_dir=params.model_directory,
                                         mode="max")
print_stats = PrintStats(params=params)

# get model
model = get_model(params)

for epoch in range(params.num_epochs):
    # Iterate over the batches of the dataset.
    for step, (x_batch_train,
               y_batch_train) in tqdm(enumerate(train_generator)):
        with tf.GradientTape() as tape:
            logits = model(x_batch_train, training=True)
            loss = loss_fn(y_batch_train, logits)
        grads = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(grads, model.trainable_weights))
        current_lr = optimizer._decayed_lr(tf.float32).numpy()
        current_loss = np.round(loss.numpy(), 2)
        print(f"\nOpt Iteration: {optimizer.__dict__['_iterations'].numpy()} "
              f"learning rate: {current_lr} loss: {np.round(loss.numpy(), 2)}")
Beispiel #19
0
    khats = list(np.load(f))

print(f'Current Khats: {khats}')

n_flow = 8

print(f"- N FLOW: {n_flow}")
# Change shape in params
target_distributions[-1]['n_flows'] = n_flow
print(f"    New params: {target_distributions[-1]}")

training_parameters = get_training_parameters(target)

average_khat = 0

q = get_model(model_choice, training_parameters)

q = train(q, training_parameters)

for k in range(K):

    z, log_q = q_posterior(q, model_choice, training_parameters)

    log_p = log_joint_pdf(z, training_parameters)

    log_r = log_p - log_q

    _, khat = psislw(log_r)

    print(f"    - New khat: {khat}")
Beispiel #20
0
def main(flags):
    params = Params("params.json")
    params.data_path = TRAIN_DATA_PATH

    params.cv_iteration = flags.cfs_cv_iteration

    logging = Logging(flags.save_model_dir, params)

    ids = os.listdir(os.path.join(params.data_path, "images"))
    train_ids, validation_ids, test_ids = data_split(ids, params)

    test_id = [test_ids[params.cv_iteration]]

    # log test id
    params.test_id = test_id[0]

    print("Test records is: ", test_id[0])

    test_ids = [id_ for id_ in test_ids if id_ not in test_id]
    extra_ids = test_ids
    random.shuffle(extra_ids)

    train_ids = train_ids + extra_ids[0:int(len(extra_ids) * 0.75)]
    validation_ids = validation_ids + extra_ids[int(len(extra_ids) * 0.75):]

    print(f"Number of training samples: {len(train_ids)}, "
          f"number of validation samples: {len(validation_ids)}, "
          f"number of test sample: {len(test_id)}")

    logging.create_model_directory(
        model_dir=f"{flags.save_model_dir}/{test_id[0].replace('.png', '')}")
    params.model_directory = logging.model_directory

    # saving model config file to model output dir
    logging.save_dict_to_json(logging.model_directory + "/config.json")

    # Generators
    train_generator = DataGenerator(train_ids, params=params, is_training=True)
    validation_generator = DataGenerator(validation_ids,
                                         params=params,
                                         is_training=False)

    trainops = TrainOps(params, num_records=len(train_generator))

    optimizer = get_optimizer(params, trainops)
    loss_fn = get_loss(params)

    model_metrics = ModelMetrics(params)
    tb_callback = TensorboardCallback(model_dir=params.model_directory)
    model_checkpoint = ModelCheckpointCustom(monitor="val_acc",
                                             model_dir=params.model_directory,
                                             mode="max")
    print_stats = PrintStats(params=params)

    # get model
    model = get_model(params)

    for epoch in range(params.num_epochs):
        # Iterate over the batches of the dataset.
        for step, (x_batch_train,
                   y_batch_train) in tqdm(enumerate(train_generator)):
            with tf.GradientTape() as tape:
                logits = model(x_batch_train, training=True)
                loss = loss_fn(y_batch_train, logits)

            grads = tape.gradient(loss, model.trainable_weights)
            optimizer.apply_gradients(zip(grads, model.trainable_weights))
            current_lr = optimizer._decayed_lr(tf.float32).numpy()
            print(
                f"\nOpt Iteration: {optimizer.__dict__['_iterations'].numpy()} "
                f"learning rate: {current_lr} loss: {np.round(loss.numpy(), 2):.2f}"
            )

            # Update training metric.
            model_metrics.update_metric_states(y_batch_train,
                                               logits,
                                               mode="train")

        # Display metrics at the end of each epoch.
        train_result_dict = model_metrics.result_metrics(mode="train")

        tb_callback.on_epoch_end(epoch=epoch,
                                 logging_dict=train_result_dict,
                                 lr=current_lr)

        # Run a validation loop at the end of each epoch.
        for x_batch_val, y_batch_val in validation_generator:
            val_logits = model(x_batch_val, training=False)
            val_loss = loss_fn(y_batch_val, val_logits)

            # Update val metrics
            model_metrics.update_metric_states(y_batch_val,
                                               val_logits,
                                               mode="val")

        print(f"validation loss is: f'{val_loss.numpy():.2f}'")

        val_result_dict = model_metrics.result_metrics(mode="val")

        tb_callback.on_epoch_end(epoch=epoch, logging_dict=val_result_dict)
        model_checkpoint.on_epoch_end(epoch,
                                      model,
                                      logging_dict=val_result_dict)
        print_stats.on_epoch_end(epoch,
                                 train_dict=train_result_dict,
                                 validation_dict=val_result_dict,
                                 lr=current_lr)

        # Reset training metrics at the end of each epoch
        model_metrics.reset_metric_states(mode="train")
        model_metrics.reset_metric_states(mode="val")
eval_dataset = DatasetCityscapesEval(root=data_dir, list_path=data_list)
eval_loader = data.DataLoader(eval_dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              pin_memory=True)

output_path = "/home/evaluating_bdl/segmentation/training_logs/%s_%s_eval" % (
    model_id, str(model_is))
if not os.path.exists(output_path):
    os.makedirs(output_path)

models = []
for i in model_is:
    restore_from = "/home/evaluating_bdl/segmentation/trained_models/%s_%d/checkpoint_40000.pth" % (
        model_id, i)
    deeplab = get_model(num_classes=num_classes)
    deeplab.load_state_dict(torch.load(restore_from))
    model = nn.DataParallel(deeplab)
    model.eval()
    model.cuda()
    models.append(model)

M_float = float(len(models))
print(M_float)

confusion_matrix = np.zeros((num_classes, num_classes))
for step, batch in enumerate(eval_loader):
    with torch.no_grad():
        print("%d/%d" % (step + 1, len(eval_loader)))

        image, label, _, name = batch
Beispiel #22
0
        data_torch = transforms.ToTensor()(data_torch)
        data_torch = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224,
                                               0.225])(data_torch)
        return data_torch


#aug = ['Ori','Ori_Hflip','Ori_Vflip','Ori_Rotate_90','Ori_Rotate_180','Ori_Rotate_270',
# 'Crop','Crop_Hflip','Crop_Vflip','Crop_Rotate_90','Crop_Rotate_180','Crop_Rotate_270']
aug = ['Ori_Hflip']

cpk_filename = configs.checkpoints + os.sep + configs.model_name + "-checkpoint.pth.tar"
best_cpk = cpk_filename.replace("-checkpoint.pth.tar", "-best_model.pth.tar")
checkpoint = torch.load(best_cpk)
cudnn.benchmark = True
model = get_model()
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_files = pd.read_csv(configs.submit_example)

with torch.no_grad():
    y_pred_prob = torch.FloatTensor([])
    for a in tqdm(aug):
        print(a)
        test_set = WeatherTTADataset(test_files, a)
        test_loader = DataLoader(dataset=test_set,
                                 batch_size=configs.bs,
                                 shuffle=False,
                                 num_workers=4,
                                 pin_memory=True,
                                 sampler=None)
    split_len:], y[split_len:]

print("Train data shape {}".format(a.shape))
print("Beginning training.....")

#

for word_num_hiden in [100, 250]:
    for sentence_num_hidden in [200, 300, 400]:
        print("=" * 40)
        print("word hum hidden {}".format(word_num_hiden))
        print("sentence hum hidden {}".format(sentence_num_hidden))
        print("=" * 40)

        model = get_model(word_num_hiden=word_num_hiden,
                          sentence_num_hidden=sentence_num_hidden,
                          type="lstm")
        model.compile(optimizer="adam",
                      loss="sparse_categorical_crossentropy",
                      metrics=["acc"])
        print(model.summary())
        filepath = "model/lstm_trainable_emb-wh_{}_sh_{}-".format(
            word_num_hiden,
            sentence_num_hidden) + "{epoch:02d}-{val_acc:.2f}.h5"

        checkpoint = ModelCheckpoint(filepath,
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')
Beispiel #24
0
 def init_model(self):
     self.model = get_model(self.config.model, self.data.input_shape,
                            self.data.nb_classes)
     self.model.compile(loss=keras.losses.categorical_crossentropy,
                        optimizer=keras.optimizers.Adadelta(),
                        metrics=['accuracy'])
Beispiel #25
0
def predict():

    is_training = False

    with tf.device('/gpu:' + str(gpu_to_use)):
        is_training_ph = tf.placeholder(tf.bool, shape=())

        pointclouds_ph, ptsseglabel_ph, ptsseglabel_onehot_ph, ptsgroup_label_ph, _, _, _ = \
            model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY)

        net_output = model.get_model(pointclouds_ph,
                                     is_training_ph,
                                     group_cate_num=NUM_CATEGORY)

        group_mat_label = tf.matmul(
            ptsgroup_label_ph,
            tf.transpose(ptsgroup_label_ph,
                         perm=[0, 2,
                               1]))  #BxNxN: (i,j) if i and j in the same group

    # Add ops to save and restore all the variables.

    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    with tf.Session(config=config) as sess:

        flog = open(os.path.join(OUTPUT_DIR, 'log.txt'), 'w')

        # Restore variables from disk.
        ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
        if ckptstate is not None:
            LOAD_MODEL_FILE = os.path.join(
                PRETRAINED_MODEL_PATH,
                os.path.basename(ckptstate.model_checkpoint_path))
            saver.restore(sess, LOAD_MODEL_FILE)
            printout(flog, "Model loaded in file: %s" % LOAD_MODEL_FILE)
        else:
            printout(flog,
                     "Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH)

        total_acc = 0.0
        total_seen = 0

        ious = np.zeros(NUM_CATEGORY)
        totalnums = np.zeros(NUM_CATEGORY)

        tpsins = [
            [] for itmp in range(NUM_CATEGORY)
        ]  #= np.array([]).reshape(0, NUM_CATEGORY)#np.zeros(NUM_CATEGORY)#
        fpsins = [
            [] for itmp in range(NUM_CATEGORY)
        ]  #= np.array([]).reshape(0, NUM_CATEGORY)#np.zeros(NUM_CATEGORY)#

        positive_ins_sgpn = np.zeros(NUM_CATEGORY)
        total_sgpn = np.zeros(NUM_CATEGORY)
        at = 0.25

        for shape_idx in range(len(TEST_DATASET)):
            cur_data, cur_seg, cur_group, cur_smpw = get_test_batch(
                TEST_DATASET, shape_idx)
            printout(flog, '%d / %d ...' % (shape_idx, len(TEST_DATASET)))

            seg_output = np.zeros_like(cur_seg)
            segrefine_output = np.zeros_like(cur_seg)
            group_output = np.zeros_like(cur_group)
            conf_output = np.zeros_like(cur_group).astype(np.float)

            pts_group_label, _ = model.convert_groupandcate_to_one_hot(
                cur_group)
            pts_label_one_hot = model.convert_seg_to_one_hot(cur_seg)
            num_data = cur_data.shape[0]

            gap = 5e-3
            volume_num = int(1. / gap) + 1
            volume = -1 * np.ones([volume_num, volume_num, volume_num]).astype(
                np.int32)
            volume_seg = -1 * np.ones([volume_num, volume_num, volume_num
                                       ]).astype(np.int32)

            intersections = np.zeros(NUM_CATEGORY)
            unions = np.zeros(NUM_CATEGORY)

            for j in range(num_data):
                print("Processsing: Shape [%d] Block[%d]" % (shape_idx, j))

                pts = cur_data[j, ...]

                feed_dict = {
                    pointclouds_ph:
                    np.expand_dims(pts, 0),
                    ptsseglabel_onehot_ph:
                    np.expand_dims(pts_label_one_hot[j, ...], 0),
                    ptsseglabel_ph:
                    np.expand_dims(cur_seg[j, ...], 0),
                    ptsgroup_label_ph:
                    np.expand_dims(pts_group_label[j, ...], 0),
                    is_training_ph:
                    is_training,
                }

                pts_corr_val0, pred_confidence_val0, ptsclassification_val0, pts_corr_label_val0 = \
                    sess.run([net_output['simmat'],
                              net_output['conf'],
                              net_output['semseg'],
                              group_mat_label],
                              feed_dict=feed_dict)

                seg = cur_seg[j, ...]
                ins = cur_group[j, ...]

                pts_corr_val = np.squeeze(pts_corr_val0[0])  #NxG
                pred_confidence_val = np.squeeze(pred_confidence_val0[0])
                ptsclassification_val = np.argmax(np.squeeze(
                    ptsclassification_val0[0]),
                                                  axis=1)

                seg = np.squeeze(seg)

                #print(label_bin)
                groupids_block, refineseg, group_seg = GroupMerging(
                    pts_corr_val, pred_confidence_val, ptsclassification_val,
                    label_bin
                )  # yolo_to_groupt(pts_corr_val, pts_corr_label_val0[0], seg,t=5)

                groupids = BlockMerging(volume, volume_seg, pts[:, 6:],
                                        groupids_block.astype(np.int32),
                                        group_seg, gap)

                seg_output[j, :] = ptsclassification_val
                group_output[j, :] = groupids
                conf_output[j, :] = pred_confidence_val
                total_acc += float(np.sum(ptsclassification_val == seg)
                                   ) / ptsclassification_val.shape[0]
                total_seen += 1

            ###### Evaluation
            ### Instance Segmentation
            ## Pred
            group_pred = group_output.reshape(-1)
            seg_pred = seg_output.reshape(-1)
            seg_gt = cur_seg.reshape(-1)
            conf_pred = conf_output.reshape(-1)
            pts = cur_data.reshape([-1, 9])

            # filtering
            x = (pts[:, 6] / gap).astype(np.int32)
            y = (pts[:, 7] / gap).astype(np.int32)
            z = (pts[:, 8] / gap).astype(np.int32)
            for i in range(group_pred.shape[0]):
                if volume[x[i], y[i], z[i]] != -1:
                    group_pred[i] = volume[x[i], y[i], z[i]]

            un = np.unique(group_pred)
            pts_in_pred = [[] for itmp in range(NUM_CATEGORY)]
            conf_in_pred = [[] for itmp in range(NUM_CATEGORY)]
            group_pred_final = -1 * np.ones_like(group_pred)
            grouppred_cnt = 0

            for ig, g in enumerate(un):  #each object in prediction
                if g == -1:
                    continue
                tmp = (group_pred == g)
                sem_seg_g = int(stats.mode(seg_pred[tmp])[0])
                if np.sum(tmp) > 0.25 * min_num_pts_in_group[sem_seg_g]:
                    conf_tmp = conf_pred[tmp]

                    pts_in_pred[sem_seg_g] += [tmp]
                    conf_in_pred[sem_seg_g].append(np.average(conf_tmp))
                    group_pred_final[tmp] = grouppred_cnt
                    grouppred_cnt += 1

            if False:
                pc_util.write_obj_color(
                    pts[:, :3], seg_pred.astype(np.int32),
                    os.path.join(OUTPUT_DIR, '%d_segpred.obj' % (shape_idx)))
                pc_util.write_obj_color(
                    pts[:, :3], group_pred_final.astype(np.int32),
                    os.path.join(OUTPUT_DIR, '%d_grouppred.obj' % (shape_idx)))
            '''
            # write to file
            cur_train_filename = TEST_DATASET.get_filename(shape_idx)
            scene_name = cur_train_filename
            counter = 0
            f_scene = open(os.path.join('output', scene_name + '.txt'), 'w')
            for i_sem in range(NUM_CATEGORY):
                for ins_pred, ins_conf in zip(pts_in_pred[i_sem], conf_in_pred[i_sem]):
                    f_scene.write('{}_{:03d}.txt {} {}\n'.format(os.path.join('output', 'pred_insts', scene_name), counter, i_sem, ins_conf))
                    with open(os.path.join('output', 'pred_insts', '{}_{:03}.txt'.format(scene_name, counter)), 'w') as f:
                        for i_ins in ins_pred:
                            if i_ins:
                                f.write('1\n')
                            else:
                                f.write('0\n')
                    counter += 1
            f_scene.close()

            # write_to_mesh
            mesh_filename = os.path.join('mesh', scene_name +'.ply')
            pc_util.write_ply(pts, mesh_filename)
            '''

            # GT
            group_gt = cur_group.reshape(-1)
            un = np.unique(group_gt)
            pts_in_gt = [[] for itmp in range(NUM_CATEGORY)]
            for ig, g in enumerate(un):
                tmp = (group_gt == g)
                sem_seg_g = int(stats.mode(seg_pred[tmp])[0])
                pts_in_gt[sem_seg_g] += [tmp]
                total_sgpn[sem_seg_g] += 1

            for i_sem in range(NUM_CATEGORY):
                tp = [0.] * len(pts_in_pred[i_sem])
                fp = [0.] * len(pts_in_pred[i_sem])
                gtflag = np.zeros(len(pts_in_gt[i_sem]))

                for ip, ins_pred in enumerate(pts_in_pred[i_sem]):
                    ovmax = -1.

                    for ig, ins_gt in enumerate(pts_in_gt[i_sem]):
                        union = (ins_pred | ins_gt)
                        intersect = (ins_pred & ins_gt)
                        iou = float(np.sum(intersect)) / np.sum(union)

                        if iou > ovmax:
                            ovmax = iou
                            igmax = ig

                    if ovmax >= at:
                        if gtflag[igmax] == 0:
                            tp[ip] = 1  # true
                            gtflag[igmax] = 1
                        else:
                            fp[ip] = 1  # multiple det
                    else:
                        fp[ip] = 1  # false positive

                tpsins[i_sem] += tp
                fpsins[i_sem] += fp

            ### Semantic Segmentation
            un, indices = np.unique(seg_gt, return_index=True)
            for segid in un:
                intersect = np.sum((seg_pred == segid) & (seg_gt == segid))
                union = np.sum((seg_pred == segid) | (seg_gt == segid))
                intersections[segid] += intersect
                unions[segid] += union
            iou = intersections / unions
            for i_iou, iou_ in enumerate(iou):
                if not np.isnan(iou_):
                    ious[i_iou] += iou_
                    totalnums[i_iou] += 1

        ap = np.zeros(NUM_CATEGORY)
        for i_sem in range(NUM_CATEGORY):
            ap[i_sem], _, _ = eval_3d_perclass(tpsins[i_sem], fpsins[i_sem],
                                               total_sgpn[i_sem])

        print('Instance Segmentation AP:', ap)
        print('Instance Segmentation mAP:', np.mean(ap))
        print('Semantic Segmentation IoU:', ious / totalnums)
        print('Semantic Segmentation Acc: %f', total_acc / total_seen)
Beispiel #26
0
        #embedding_matrix[i] = np.random.randn(EMBEDDING_DIM)
print('Null word embeddings: %d' %
      np.sum(np.sum(embedding_matrix, axis=1) == 0))

#np.save(embedding_matrix_path, embedding_matrix)

# sequence_length = x.shape[1]
# vocabulary_size = len(vocabulary_inv)
# print "sequence len: ", sequence_length
# print "vocab size: ", vocabulary_size
args.filter_sizes = [3, 4, 5, 6]
args.nb_words = nb_words
args.embedding_matrix = embedding_matrix
#args.embedding_matrix = None

model = get_model(args)
if args.snapshot:
    model.load_weights(args.snapshot)
if args.test:
    test_set = []
    int_labels_test = []
    with codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:
        for line in f:
            # print  line
            tokens = line.strip().split(",")
            try:
                int_labels_test.append(label_map[tokens[2]])
                test_set.append(tokens[1].lower())
            except Exception, e:
                print "--------------", e.message
    sequences_test = tokenizer.texts_to_sequences(test_set)
Beispiel #27
0
def train():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(FLAGS.gpu)):
            batch = tf.Variable(0, trainable=False, name='batch')
            learning_rate = tf.train.exponential_decay(
                BASE_LEARNING_RATE,  # base learning rate
                batch * BATCH_SIZE,  # global_var indicating the number of steps
                DECAY_STEP,  # step size
                DECAY_RATE,  # decay rate
                staircase=True  # Stair-case or continuous decreasing
            )
            bn_decay = get_bn_decay(batch)
            learning_rate = tf.maximum(learning_rate, LEARNING_RATE_CLIP)

            lr_op = tf.summary.scalar('learning_rate', learning_rate)

            pointclouds_ph, ptsseglabel_ph, ptsseglabel_onehot_ph, ptsgroup_label_ph, pts_seglabel_mask_ph, pts_group_mask_ph, alpha_ph = \
                model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY)
            is_training_ph = tf.placeholder(tf.bool, shape=())

            labels = {'ptsgroup': ptsgroup_label_ph,
                      'semseg': ptsseglabel_ph,
                      'semseg_onehot': ptsseglabel_onehot_ph,
                      'semseg_mask': pts_seglabel_mask_ph,
                      'group_mask': pts_group_mask_ph}

            net_output = model.get_model(pointclouds_ph, is_training_ph, group_cate_num=NUM_CATEGORY, m=MARGINS[0], bn_decay=bn_decay)
            ptsseg_loss, simmat_loss, loss, grouperr, same, same_cnt, diff, diff_cnt, pos, pos_cnt = model.get_loss(net_output, labels, alpha_ph, MARGINS)

            total_training_loss_ph = tf.placeholder(tf.float32, shape=())
            group_err_loss_ph = tf.placeholder(tf.float32, shape=())
            total_train_loss_sum_op = tf.summary.scalar('total_training_loss', total_training_loss_ph)
            group_err_op = tf.summary.scalar('group_err_loss', group_err_loss_ph)

        train_variables = tf.trainable_variables()

        trainer = tf.train.AdamOptimizer(learning_rate)
        train_op = trainer.minimize(loss, var_list=train_variables, global_step=batch)
        train_op_pretrain = trainer.minimize(ptsseg_loss, var_list=train_variables, global_step=batch)
        train_op_5epoch = trainer.minimize(simmat_loss, var_list=train_variables, global_step=batch)

        loader = tf.train.Saver([v for v in tf.all_variables()#])
                                 if
                                   ('conf_logits' not in v.name) and
                                    ('Fsim' not in v.name) and
                                    ('Fsconf' not in v.name) and
                                    ('batch' not in v.name)
                                ])
        saver = tf.train.Saver([v for v in tf.all_variables()], max_to_keep=200)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        sess = tf.Session(config=config)

        init = tf.global_variables_initializer()
        sess.run(init)

        train_writer = tf.summary.FileWriter(SUMMARIES_FOLDER + '/train', sess.graph)

        fcmd = open(os.path.join(LOG_STORAGE_PATH, 'cmd.txt'), 'w')
        fcmd.write(str(FLAGS))
        fcmd.close()

        flog = open(os.path.join(LOG_STORAGE_PATH, 'log.txt'), 'w')

        ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
        if ckptstate is not None:
            LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH, os.path.basename(ckptstate.model_checkpoint_path))
            loader.restore(sess, LOAD_MODEL_FILE)
            printout(flog, "Model loaded in file: %s" % LOAD_MODEL_FILE)
        else:
            printout(flog, "Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH)


        ## load test data into memory
        test_data = []
        test_group = []
        test_seg = []
        test_smpw = []
        for i in range(len(TEST_DATASET)):
            print(i)
            cur_data, cur_seg, cur_group, cur_smpw = get_test_batch(TEST_DATASET, i)
            test_data += [cur_data]
            test_group += [cur_group]
            test_seg += [cur_seg]
            test_smpw += [cur_smpw]

        test_data = np.concatenate(test_data,axis=0)
        test_group = np.concatenate(test_group,axis=0)
        test_seg = np.concatenate(test_seg,axis=0)
        test_smpw = np.concatenate(test_smpw,axis=0)
        num_data_test = test_data.shape[0]
        num_batch_test = num_data_test // BATCH_SIZE

        def train_one_epoch(epoch_num):

            ### NOTE: is_training = False: We do not update bn parameters during training due to the small batch size. This requires pre-training PointNet with large batchsize (say 32).
            if PRETRAIN:
                is_training = True
            else:
                is_training = False

            total_loss = 0.0
            total_grouperr = 0.0
            total_same = 0.0
            total_diff = 0.0
            total_pos = 0.0
            same_cnt0 = 0

            train_idxs = np.arange(0, len(TRAIN_DATASET))
            np.random.shuffle(train_idxs)
            num_batches = len(TRAIN_DATASET)//BATCH_SIZE
            for batch_idx in range(num_batches):
                print('{}/{}'.format(batch_idx, num_batches))
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx+1) * BATCH_SIZE
                batch_data, batch_label, batch_group, batch_smpw = get_batch(TRAIN_DATASET, train_idxs, start_idx, end_idx)
                aug_data = provider.rotate_point_cloud_z(batch_data)
                pts_label_one_hot = model.convert_seg_to_one_hot(batch_label)

                if PRETRAIN:
                    feed_dict = {
                        pointclouds_ph: aug_data, 
                        ptsseglabel_ph: batch_label,
                        ptsseglabel_onehot_ph: pts_label_one_hot,
                        pts_seglabel_mask_ph: batch_smpw,
                        is_training_ph: is_training,
                        alpha_ph: min(10., (float(epoch_num) / 5.) * 2. + 2.),
                    }
                    _, loss_val  = sess.run([train_op_pretrain, ptsseg_loss], feed_dict=feed_dict)
                    total_loss += loss_val
                    if batch_idx % 10 == 9:
                        printout(flog, 'Batch: %d, loss: %f' % (batch_idx, total_loss/10))
                        total_loss = 0.0
                else:
                    pts_group_label, pts_group_mask = model.convert_groupandcate_to_one_hot(batch_group)
                    feed_dict = {
                        pointclouds_ph: batch_data,
                        ptsseglabel_ph: batch_label,
                        ptsseglabel_onehot_ph: pts_label_one_hot,
                        pts_seglabel_mask_ph: batch_smpw,
                        ptsgroup_label_ph: pts_group_label,
                        pts_group_mask_ph: pts_group_mask,
                        is_training_ph: is_training,
                        alpha_ph: min(10., (float(epoch_num) / 5.) * 2. + 2.),
                    }

                    if epoch_num < 20:
                        _, loss_val, simmat_val, grouperr_val, same_val, same_cnt_val, diff_val, diff_cnt_val, pos_val, pos_cnt_val = sess.run([train_op_5epoch, simmat_loss, net_output['simmat'], grouperr, same, same_cnt, diff, diff_cnt, pos, pos_cnt], feed_dict=feed_dict)
                    else:
                        _, loss_val, simmat_val, grouperr_val, same_val, same_cnt_val, diff_val, diff_cnt_val, pos_val, pos_cnt_val = sess.run([train_op, loss, net_output['simmat'], grouperr, same, same_cnt, diff, diff_cnt, pos, pos_cnt], feed_dict=feed_dict)

                    total_loss += loss_val
                    total_grouperr += grouperr_val
                    total_diff += (diff_val / diff_cnt_val)
                    if same_cnt_val > 0:
                        total_same += same_val / same_cnt_val
                        same_cnt0 += 1
                    total_pos += pos_val / pos_cnt_val

                    if batch_idx % 10 == 9:
                        printout(flog, 'Batch: %d, loss: %f, grouperr: %f, same: %f, diff: %f, pos: %f' % (batch_idx, total_loss/10, total_grouperr/10, total_same/same_cnt0, total_diff/10, total_pos/10))

                        lr_sum, batch_sum, train_loss_sum, group_err_sum = sess.run( \
                            [lr_op, batch, total_train_loss_sum_op, group_err_op], \
                            feed_dict={total_training_loss_ph: total_loss / 10.,
                                       group_err_loss_ph: total_grouperr / 10., })

                        train_writer.add_summary(train_loss_sum, batch_sum)
                        train_writer.add_summary(lr_sum, batch_sum)
                        train_writer.add_summary(group_err_sum, batch_sum)

                        total_grouperr = 0.0
                        total_loss = 0.0
                        total_diff = 0.0
                        total_same = 0.0
                        total_pos = 0.0
                        same_cnt0 = 0

            cp_filename = saver.save(sess, os.path.join(MODEL_STORAGE_PATH, 'epoch_' + str(epoch_num + 1) + '.ckpt'))
            printout(flog, 'Successfully store the checkpoint model into ' + cp_filename)

        def val_one_epoch(epoch_num):
            is_training = False

            def evaluate_confusion(confusion_matrix, epoch):
                conf = confusion_matrix.value()
                total_correct = 0
                valids = np.zeros(NUM_CATEGORY, dtype=np.float32)
                for c in range(NUM_CATEGORY):
                    num = conf[c,:].sum()
                    valids[c] = -1 if num == 0 else float(conf[c][c]) / float(num)
                    total_correct += conf[c][c]
                instance_acc = -1 if conf.sum() == 0 else float(total_correct) / float(conf.sum())
                avg_acc = -1 if np.all(np.equal(valids, -1)) else np.mean(valids[np.not_equal(valids, -1)])
                print('Epoch: {}\tAcc(inst): {:.6f}\tAcc(avg): {:.6f}'.format(epoch, instance_acc, avg_acc))
                for class_ind, class_acc in enumerate(valids[np.not_equal(valids, -1)]):
                    print('{}: {}'.format(class_ind, class_acc))
                with open(os.path.join(LOG_STORAGE_PATH, 'ACC_{}.txt'.format(epoch)), 'w') as f:
                    f.write('Epoch: {}\tAcc(inst): {:.6f}\tAcc(avg): {:.6f}'.format(epoch, instance_acc, avg_acc))
                    for class_ind, class_acc in enumerate(valids[np.not_equal(valids, -1)]):
                        f.write('{}: {}\n'.format(class_ind, class_acc))

            confusion_val = tnt.meter.ConfusionMeter(NUM_CATEGORY)
            for j in range(0, num_batch_test):
                print('{}/{}'.format(j, num_batch_test))
                start_idx = j * BATCH_SIZE
                end_idx = (j + 1) * BATCH_SIZE
                pts_label_one_hot = model.convert_seg_to_one_hot(test_seg[start_idx:end_idx])
                feed_dict = {
                    pointclouds_ph: test_data[start_idx:end_idx,...],
                    ptsseglabel_ph: test_seg[start_idx:end_idx],
                    ptsseglabel_onehot_ph: pts_label_one_hot,
                    pts_seglabel_mask_ph: test_smpw[start_idx:end_idx, ...],
                    is_training_ph: is_training,
                    alpha_ph: min(10., (float(epoch_num) / 5.) * 2. + 2.),
                }

                ptsclassification_val0 = sess.run([net_output['semseg']], feed_dict=feed_dict)
                ptsclassification_val = torch.from_numpy(ptsclassification_val0[0]).view(-1, NUM_CATEGORY)
                ptsclassification_gt = torch.from_numpy(pts_label_one_hot).view(-1, NUM_CATEGORY)
                #import ipdb
                #ipdb.set_trace()
                #pc_util.write_obj_color(np.reshape(test_data[:BATCH_SIZE,:,:3], [-1,3])[:,:3], np.argmax(ptsclassification_val.numpy(), 1), 'pred3.obj')
                #pc_util.write_obj_color(np.reshape(test_data[:BATCH_SIZE,:,:3], [-1,3])[:,:3], np.argmax(ptsclassification_gt.numpy(), 1), 'gt.obj')
                confusion_val.add(target=ptsclassification_gt, predicted=ptsclassification_val)
            evaluate_confusion(confusion_val, epoch_num)


        if not os.path.exists(MODEL_STORAGE_PATH):
            os.mkdir(MODEL_STORAGE_PATH)

        for epoch in range(TRAINING_EPOCHES):
            printout(flog, '\n>>> Training for the epoch %d/%d ...' % (epoch, TRAINING_EPOCHES))
            train_one_epoch(epoch)
            flog.flush()
            if PRETRAIN:
                val_one_epoch(epoch)

        flog.close()
Beispiel #28
0
def predict():
    is_training = False

    with tf.device('/gpu:' + str(gpu_to_use)):
        is_training_ph = tf.placeholder(tf.bool, shape=())

        pointclouds_ph, ptsseglabel_ph, ptsgroup_label_ph, _, _, _ = \
            model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY)

        net_output = model.get_model(pointclouds_ph, is_training_ph, group_cate_num=NUM_CATEGORY)
        group_mat_label = tf.matmul(ptsgroup_label_ph, tf.transpose(ptsgroup_label_ph, perm=[0, 2, 1])) #BxNxN: (i,j) if i and j in the same group

    # Add ops to save and restore all the variables.

    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    with tf.Session(config=config) as sess:

        ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
        if ckptstate is not None:
            LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH,os.path.basename(ckptstate.model_checkpoint_path))
            saver.restore(sess, LOAD_MODEL_FILE)
            print("Model loaded in file: %s" % LOAD_MODEL_FILE)
        else:
            print("Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH)


        for shape_idx in range(len_pts_files):

            cur_train_filename = test_file_list[shape_idx]

            if not os.path.exists(cur_train_filename):
                continue
            cur_data, cur_group, _, cur_seg = provider.loadDataFile_with_groupseglabel_stanfordindoor(cur_train_filename)

            seg_output = np.zeros_like(cur_seg)
            segrefine_output = np.zeros_like(cur_seg)
            group_output = np.zeros_like(cur_group)
            conf_output = np.zeros_like(cur_group).astype(np.float)

            pts_label_one_hot, pts_label_mask = model.convert_seg_to_one_hot(cur_seg)
            pts_group_label, _ = model.convert_groupandcate_to_one_hot(cur_group)
            num_data = cur_data.shape[0]

            gap = 5e-3
            volume_num = int(1. / gap)+1
            volume = -1* np.ones([volume_num,volume_num,volume_num]).astype(np.int32)
            volume_seg = -1* np.ones([volume_num,volume_num,volume_num, NUM_CATEGORY]).astype(np.int32)

            intersections = np.zeros(NUM_CATEGORY)
            unions = np.zeros(NUM_CATEGORY)
            print('[%d / %d] Block Number: %d' % (shape_idx, len_pts_files, num_data))
            print('Loading train file %s' % (cur_train_filename))

            flag = True
            for j in range(num_data):

                pts = cur_data[j,...]

                feed_dict = {
                    pointclouds_ph: np.expand_dims(pts,0),
                    ptsseglabel_ph: np.expand_dims(pts_label_one_hot[j,...],0),
                    ptsgroup_label_ph: np.expand_dims(pts_group_label[j,...],0),
                    is_training_ph: is_training,
                }

                pts_corr_val0, pred_confidence_val0, ptsclassification_val0, pts_corr_label_val0 = \
                    sess.run([net_output['simmat'],
                              net_output['conf'],
                              net_output['semseg'],
                              group_mat_label],
                              feed_dict=feed_dict)

                seg = cur_seg[j,...]
                ins = cur_group[j,...]

                pts_corr_val = np.squeeze(pts_corr_val0[0]) #NxG
                pred_confidence_val = np.squeeze(pred_confidence_val0[0])
                ptsclassification_val = np.argmax(np.squeeze(ptsclassification_val0[0]),axis=1)

                seg = np.squeeze(seg)
                # print label_bin

                try:
                    groupids_block, refineseg, group_seg = GroupMerging_old(pts_corr_val, pred_confidence_val, ptsclassification_val, label_bin)  # yolo_to_groupt(pts_corr_val, pts_corr_label_val0[0], seg,t=5)
                    groupids = BlockMerging(volume, volume_seg, pts[:,6:], groupids_block.astype(np.int32), group_seg, gap)


                seg_output[j,:] = ptsclassification_val
                segrefine_output[j,:] = refineseg
                group_output[j,:] = groupids
                conf_output[j,:] = pred_confidence_val

            ###### Generate Results for Evaluation

            basefilename = os.path.basename(cur_train_filename).split('.')[-2]
            scene_fn = os.path.join(OUTPUT_DIR, '%s.txt' % basefilename)
            f_scene = open(scene_fn, 'w')
            scene_gt_fn = os.path.join(GT_DIR, '%s.txt' % basefilename)
            group_pred = group_output.reshape(-1)
            seg_pred = seg_output.reshape(-1)
            conf = conf_output.reshape(-1)
            pts = cur_data.reshape([-1, 9])

            # filtering
            x = (pts[:, 6] / gap).astype(np.int32)
            y = (pts[:, 7] / gap).astype(np.int32)
            z = (pts[:, 8] / gap).astype(np.int32)
            for i in range(group_pred.shape[0]):
                if volume[x[i], y[i], z[i]] != -1:
                    group_pred[i] = volume[x[i], y[i], z[i]]

            un = np.unique(group_pred)
            pts_in_pred = [[] for itmp in range(NUM_CATEGORY)]
            group_pred_final = -1 * np.ones_like(group_pred)
            grouppred_cnt = 0

            for ig, g in enumerate(un): #each object in prediction
                if g == -1:
                    continue
                obj_fn = "predicted_masks/%s_%d.txt" % (basefilename, ig)
                tmp = (group_pred == g)
                sem_seg_g = int(stats.mode(seg_pred[tmp])[0])
                if np.sum(tmp) > 0.25 * min_num_pts_in_group[sem_seg_g]:
                    pts_in_pred[sem_seg_g] += [tmp]
                    group_pred_final[tmp] = grouppred_cnt
                    conf_obj = np.mean(conf[tmp])
                    grouppred_cnt += 1
                    f_scene.write("%s %d %f\n" % (obj_fn, sem_seg_g, conf_obj))
                    np.savetxt(os.path.join(OUTPUT_DIR, obj_fn), tmp.astype(np.int), fmt='%d')

            seg_gt = cur_seg.reshape(-1)
            group_gt = cur_group.reshape(-1)
            groupid_gt = seg_gt * 1000 + group_gt
            np.savetxt(scene_gt_fn, groupid_gt.astype(np.int64), fmt='%d')

            f_scene.close()

            if output_verbose:
                output_color_point_cloud(pts[:, 6:], seg_pred.astype(np.int32),
                                         os.path.join(OUTPUT_DIR, '%s_segpred.obj' % (obj_fn)))
                output_color_point_cloud(pts[:, 6:], group_pred_final.astype(np.int32),
                                         os.path.join(OUTPUT_DIR, '%s_grouppred.obj' % (obj_fn)))
Beispiel #29
0
def predict():
    is_training = False

    with tf.device('/gpu:' + str(gpu_to_use)):
        is_training_ph = tf.placeholder(tf.bool, shape=())

        pointclouds_ph, ptsseglabel_ph, ptsgroup_label_ph, _, _, _ = \
            model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY)

        group_mat_label = tf.matmul(
            ptsgroup_label_ph, tf.transpose(ptsgroup_label_ph, perm=[0, 2, 1]))
        net_output = model.get_model(pointclouds_ph,
                                     is_training_ph,
                                     group_cate_num=NUM_CATEGORY)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True

    with tf.Session(config=config) as sess:

        # Restore variables from disk.

        ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
        if ckptstate is not None:
            LOAD_MODEL_FILE = os.path.join(
                PRETRAINED_MODEL_PATH,
                os.path.basename(ckptstate.model_checkpoint_path))
            saver.restore(sess, LOAD_MODEL_FILE)
            print("Model loaded in file: %s" % LOAD_MODEL_FILE)
        else:
            print("Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH)

        ths = np.zeros(NUM_CATEGORY)
        ths_ = np.zeros(NUM_CATEGORY)
        cnt = np.zeros(NUM_CATEGORY)
        min_groupsize = np.zeros(NUM_CATEGORY)
        min_groupsize_cnt = np.zeros(NUM_CATEGORY)

        for shape_idx in range(len_pts_files):

            cur_train_filename = test_file_list[shape_idx]

            if not os.path.exists(cur_train_filename):
                continue
            cur_data, cur_group, _, cur_seg = provider.loadDataFile_with_groupseglabel_stanfordindoor(
                cur_train_filename)

            if OUTPUT_VERBOSE:
                pts = np.reshape(cur_data, [-1, 9])
                output_point_cloud_rgb(
                    pts[:, 6:], pts[:, 3:6],
                    os.path.join(OUTPUT_DIR, '%d_pts.obj' % (shape_idx)))

            pts_label_one_hot, pts_label_mask = model.convert_seg_to_one_hot(
                cur_seg)
            pts_group_label, _ = model.convert_groupandcate_to_one_hot(
                cur_group)
            num_data = cur_data.shape[0]

            cur_seg_flatten = np.reshape(cur_seg, [-1])
            un, indices = np.unique(cur_group, return_index=True)
            for iu, u in enumerate(un):
                groupsize = np.sum(cur_group == u)
                groupcate = cur_seg_flatten[indices[iu]]
                min_groupsize[groupcate] += groupsize
                # print groupsize, min_groupsize[groupcate]/min_groupsize_cnt[groupcate]
                min_groupsize_cnt[groupcate] += 1

            for j in range(num_data):

                print("Processsing: Shape [%d] Block[%d]" % (shape_idx, j))

                pts = cur_data[j, ...]

                feed_dict = {
                    pointclouds_ph: np.expand_dims(pts, 0),
                    ptsseglabel_ph: np.expand_dims(pts_label_one_hot[j, ...],
                                                   0),
                    ptsgroup_label_ph: np.expand_dims(pts_group_label[j, ...],
                                                      0),
                    is_training_ph: is_training,
                }

                pts_corr_val0, pred_confidence_val0, ptsclassification_val0, pts_corr_label_val0 = \
                                        sess.run([net_output['simmat'],
                                                  net_output['conf'],
                                                  net_output['semseg'],
                                                  group_mat_label],
                                                  feed_dict=feed_dict)
                seg = cur_seg[j, ...]
                ins = cur_group[j, ...]

                pts_corr_val = np.squeeze(pts_corr_val0[0])
                pred_confidence_val = np.squeeze(pred_confidence_val0[0])
                ptsclassification_val = np.argmax(np.squeeze(
                    ptsclassification_val0[0]),
                                                  axis=1)

                pts_corr_label_val = np.squeeze(1 - pts_corr_label_val0)
                seg = np.squeeze(seg)
                ins = np.squeeze(ins)

                ind = (seg == 8)
                pts_corr_val0 = (pts_corr_val > 1.).astype(np.float)
                print np.mean(
                    np.transpose(np.abs(pts_corr_label_val[ind] -
                                        pts_corr_val0[ind]),
                                 axes=[1, 0])[ind])

                ths, ths_, cnt = Get_Ths(pts_corr_val, seg, ins, ths, ths_,
                                         cnt)
                print ths / cnt

                if OUTPUT_VERBOSE:
                    un, indices = np.unique(ins, return_index=True)
                    for ii, id in enumerate(indices):
                        corr = pts_corr_val[id].copy()
                        output_scale_point_cloud(
                            pts[:, 6:], np.float32(corr),
                            os.path.join(
                                OUTPUT_DIR, '%d_%d_%d_%d_scale.obj' %
                                (shape_idx, j, un[ii], seg[id])))
                        corr = pts_corr_label_val[id]
                        output_scale_point_cloud(
                            pts[:, 6:], np.float32(corr),
                            os.path.join(
                                OUTPUT_DIR, '%d_%d_%d_%d_scalegt.obj' %
                                (shape_idx, j, un[ii], seg[id])))
                    output_scale_point_cloud(
                        pts[:, 6:], np.float32(pred_confidence_val),
                        os.path.join(OUTPUT_DIR,
                                     '%d_%d_conf.obj' % (shape_idx, j)))
                    output_color_point_cloud(
                        pts[:, 6:], ptsclassification_val.astype(np.int32),
                        os.path.join(OUTPUT_DIR, '%d_seg.obj' % (shape_idx)))

        ths = [
            ths[i] / cnt[i] if cnt[i] != 0 else 0.2 for i in range(len(cnt))
        ]
        np.savetxt(os.path.join(RESTORE_DIR, 'pergroup_thres.txt'), ths)

        min_groupsize = [
            int(float(min_groupsize[i]) /
                min_groupsize_cnt[i]) if min_groupsize_cnt[i] != 0 else 0
            for i in range(len(min_groupsize))
        ]
        np.savetxt(os.path.join(RESTORE_DIR, 'mingroupsize.txt'),
                   min_groupsize)
import torch
from models import model
from toolbox import generate_samples, draw_samples
from torch.autograd import Variable

#name and path of the file where the model was saved
file_name = 'Gaussian_Writing_GRUIAM_epoch_700_1_0,005_Adam_256_2.pt'
path = 'C://Users//Julie//Documents//GitHub//DeepLearningProject//trained_models//'

#model name can be either Gaussian_Writing_LSTM or Gaussian_Writing_GRU
model_name = 'Gaussian_Writing_GRU'

#parameter to create the model
parameters = {'n_gaussian': 1, 'dropout': 0.2, 'rnn_size': 256, 'rnn_layers': 2, "input_size": 3}
net = model.get_model(model_name, parameters)
net.load_state_dict(torch.load(path+file_name))

for i in range(10):
    x0 = Variable(torch.Tensor([0,0,1]).view(1,1,3))
    data = generate_samples.generate(net, x0, n=60)
    draw_samples.plot_points(data)