Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("img_path", type=str,  help="path to the RGB image input")
    parser.add_argument("focal_len", type=float,  help="focal length of the camera")
    args = parser.parse_args()
    
    # switching to GPU if possible
    use_gpu = torch.cuda.is_available()
    print("\nusing GPU:", use_gpu)    

    # loading models
    print("\nLoading models...")
    model_de = net.get_model(output="depth", use_gpu=use_gpu)
    if use_gpu:
        model_de = model_de.cuda()

    model_seg = net.get_model(output="seg", use_gpu=use_gpu)
    if use_gpu:
        model_seg = model_seg.cuda()
            
    # setting models to evaluation mode
    model_de.eval()
    model_seg.eval()
    print("Done.")
      
    # reading image
    img = torch.Tensor(load_img(args.img_path))
    
    # running model on the image
    if use_gpu:
        img = img.cuda()
        
    print("Plotting...")
    output_de = model_de(img)
    output_seg = model_seg(img)
    
    # bilinear upsampling
    output_de = F.interpolate(output_de, size=(320, 320), mode="bilinear", align_corners=True)
    output_seg = F.interpolate(output_seg, size=(320, 320), mode="bilinear", align_corners=True)

    # softmax for semantic segmentation
    output_seg = F.softmax(output_seg, dim=1)

    # plotting the results
    output_de = output_de.cpu()[0].data.numpy()
    output_seg = output_seg.cpu()[0].data.numpy()
    img = img.cpu()[0].data.numpy()  
    show_img_preds(img, output_de, output_seg, uc_th=0.9, apply_depth_mask=True)

    # visualize the points in 3D
    show_point_cloud(img, output_de, output_seg, args.focal_len, uc_th=0.9, apply_depth_mask=True)
    print("Done")
Ejemplo n.º 2
0
def train():
    args = get_args()
    train_input = args.train
    test_input = args.test
    epochs = args.epochs
    batch_size = args.batch_size

    model = net.get_model()

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(1e-3, amsgrad=True),
                  metrics=['accuracy'])

    h5f_train = h5py.File(train_input, 'r')
    h5f_test = h5py.File(test_input, 'r')

    x_train = h5f_train['input']
    y_train = h5f_train['label']
    x_test = h5f_test['input']
    y_test = h5f_test['label']

    tensorboard = TensorBoard(log_dir="./log/", write_images=True)
    checkpoint = ModelCheckpoint(
        filepath="./log/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5",
        monitor='val_loss',
        verbose=1,
        save_best_only=True)

    model.fit(x_train, [y_train[0], y_train[1], y_train[2], y_train[3]],
              validation_data=(x_test,
                               [y_test[0], y_test[1], y_test[2], y_test[3]]),
              epochs=epochs,
              batch_size=batch_size,
              shuffle='batch',
              callbacks=[checkpoint, tensorboard])
Ejemplo n.º 3
0
 def deal_with_data(self):
     '''
     处理数据,没有可不写。
     :return:
     '''
     csv_path = os.path.join(DATA_PATH, 'TBDetection', 'train.csv')
     df = pd.read_csv(csv_path)
     img_file_list = list(df['image_path'].values)
     xml_file_list = list(df['xml_path'].values)
     transform = transforms.Compose(
         [transforms.RandomHorizontalFlip(0.5),
          transforms.ToTensor()])
     train_data = MyDataset(os.path.join(DATA_PATH, 'TBDetection'),
                            img_file_list,
                            xml_file_list,
                            transforms=transform,
                            data_generate=data_generate)
     self.train_loader = DataLoader(dataset=train_data,
                                    batch_size=args.BATCH,
                                    shuffle=True,
                                    collate_fn=collate_fn)
     self.model = get_model()
     #self.model.load_state_dict(torch.load('./model/best.pth'))
     self.model.to(device)
     print('deal with data done...')
Ejemplo n.º 4
0
 def load_model(self):
     '''
     模型初始化,必须在此方法中加载模型
     '''
     self.model = get_model()
     print('load from best.pth...')
     self.model.load_state_dict(torch.load(os.path.join(MODEL_PATH, 'best.pth')))
     self.model.to(device)
     print('load model done...')
Ejemplo n.º 5
0
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import random
import scipy.ndimage.filters
import multiprocessing
from multiprocessing import Process
from multiprocessing import Queue
import time
from net import get_model
import sys

if __name__ == "__main__":
    assert len(sys.argv) == 2, "Enter the Image path that you want to process"

    model_name = "best_model.pth"
    model = get_model(model_name)
    model = model.to(device)

    img = np.array(Image.open(sys.argv[1]))
    img = np.rollaxis(img, 2)
    img = torch.tensor(img).unsqueeze(0).to(device).float() / 255
    with torch.no_grad():
        res = model(img)
    res = res.cpu().numpy()[0][0]

    res[res >= .5] = 1
    res[res < .5] = 0

    plt.imshow(res)
    plt.show()
Ejemplo n.º 6
0
                  num_workers=args.num_workers)  # , drop_last=True)
    for x in ['train', 'val']
}

dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

# images, indices, labels, ids, cams, names = next(iter(dataloaders['train']))

num_label = len(attributes.labels)  # image_datasets['train'].num_label()
# num_id = image_datasets['train'].num_id()
# labels_list = image_datasets['train'].labels()

######################################################################
# Model and Optimizer
# ------------------
model = get_model(model_name, num_label)
if use_gpu:
    model = model.cuda()

# loss
criterion_bce = nn.BCELoss()
criterion_ce = nn.CrossEntropyLoss()

# optimizer
ignored_params = list(map(id, model.features.parameters()))
classifier_params = filter(lambda p: id(p) not in ignored_params,
                           model.parameters())
optimizer = torch.optim.SGD([
    {
        'params': model.features.parameters(),
        'lr': 0.01
                        self.train_object.save_model(
                            str(iters // len(self.dataobject) + offset))

                    print(iters, end=" ")
                    self.train_object.train(data)

        for p in arr:
            p.join()


if __name__ == "__main__":
    assert len(sys.argv) <= 2, "Enter the the model number to resume from"
    last_model = 0
    if (len(sys.argv) == 2):
        last_model = sys.argv[1]
        model = get_model("model" + str(last_model) + ".pth")
    else:
        model = get_model()

    data_object = dataloader(paths)

    T = trainer(model)

    multi = multiprocess_control(data_object,
                                 workers=2,
                                 produce_func=producer,
                                 shared_Q=Q,
                                 epochs_desired=30,
                                 train_object=T,
                                 offset=last_model)
    multi.start_training()
Ejemplo n.º 8
0
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

# images, indices, labels, ids, cams, names = next(iter(dataloaders['train']))

num_label = image_datasets['train'].num_label()
num_id = image_datasets['train'].num_id()
labels_list = image_datasets['train'].labels()

print("-->num label  :", num_label)
print("-->num id     :", num_id)
print("-->labels_list:", labels_list)

######################################################################
# Model and Optimizer
# ------------------
model = get_model(model_name, num_label, args.use_id, num_id=num_id)
if use_gpu:
    model = model.cuda()

# loss
criterion_bce = nn.BCELoss()
criterion_ce = nn.CrossEntropyLoss()

# optimizer
ignored_params = list(map(id, model.features.parameters()))
classifier_params = filter(lambda p: id(p) not in ignored_params,
                           model.parameters())
optimizer = torch.optim.SGD([
    {
        'params': model.features.parameters(),
        'lr': 0.01
Ejemplo n.º 9
0
if __name__ == '__main__':
    param = {
        'phase': 'train',
        'batch_size': 3,
        'eval_n_crop': 4,
        'learning_rate': 1e-3,
        'momentum': 0.9,
        'weight_decay': 0,
        'epochs': 40,
        'mode': 'sord_ent_weighted'   # sord, sord_ent_weighted, sord_min_local_ent, sord_weighted_minent, sord_align_grad, classification, regression, reg_of_cls
    }
    restore_file = "experiments/train_lr_1e-03_momentum_0.9_wd_0_epoch_30_mode_sord_pretrained_DeepLabV3+_PascalVOC_crop_375*513/best.pth.tar"
    model_dir = 'refine_CADC_depth_aggregated_new_sord_weighted_sigmoid_16x16_lr_1e-03_momentum_0.9_wd_0_epoch_30_mode_sord_pretrained_DeepLabV3+_Kitti_crop_513*513'
    train_type = 'refine'  # refine or continue

    model = net.get_model(param['mode'])
    model.cuda()

    ## Pretrained on Pascal semantic segmentation, retrain on Kitti depth estimation
    # last_layer = ['classifier.classifier.4.weight', 'classifier.classifier.4.bias']
    # last_layer_params = list(map(lambda x: x[1], list(filter(lambda kv: kv[0] in last_layer, model.named_parameters()))))
    # base_params = list(map(lambda x: x[1], list(filter(lambda kv: kv[0] not in last_layer, model.named_parameters()))))
    #
    # optimizer = torch.optim.SGD([
    #     {'params': base_params, 'lr': param['learning_rate']},
    #     {'params': last_layer_params, 'lr': param['learning_rate'] * 10}
    # ], momentum=param['momentum'], weight_decay=param['weight_decay'], nesterov=True)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1)

    optimizer = torch.optim.SGD(params=model.parameters(), lr=param['learning_rate'], momentum=param['momentum'], weight_decay=param['weight_decay'], nesterov=True)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
Ejemplo n.º 10
0
from net import get_model
from data import train_val_dataset
from util import cross_entropy, seed_everything, smooth_one_hot

root = './data'  #'/content/datasets'
batch_size = 192
num_epochs = 20

lr = 0.1
m = 0.9
w_d = 0.1
fineturning = True

#model = Resnet50(pretrained=True)
#model = get_model(2, pretrained=True)
model = get_model('resnet50', pretrained=True)

model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=m,
                            weight_decay=w_d)

# and a learning rate scheduler
#lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)
#lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[18,25,25,], gamma=0.1) #22
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, 'min', patience=2, min_lr=1e-6)  # 似乎在fineturning时不利于训练
#cross_entropy =  nn.CrossEntropyLoss(reduction='sum').to(device)    #weight=torch.tensor([1.,1.]),

Ejemplo n.º 11
0
                                              min_delta=0,
                                              patience=config.es_patience,
                                              verbose=1,
                                              mode='auto',
                                              baseline=None,
                                              restore_best_weights=False)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                              factor=0.1,
                                              patience=config.lr_patience,
                                              min_lr=0.000001)
callbacks.append(EarlyStopping)
callbacks.append(reduce_lr)
callbacks.append(ModelCheckpoint)
from net import get_model

model, base_model = get_model()
optimizer = keras.optimizers.adam(lr=0.001, decay=0)
if config.num_class == 2:
    loss_name = "binary_crossentropy"
else:
    loss_name = "categorical_crossentropy"
if not config.onehot:
    loss_name = "sparse_" + loss_name

model.compile(optimizer=optimizer, loss=loss_name, metrics=["acc"])

print(model.summary())
model.fit_generator(
    train_generator,
    steps_per_epoch=steps_per_epoch,
    epochs=args.EPOCHS,
Ejemplo n.º 12
0
    train_dst = MyDataset(x_train, y_train, transform=transforms['train'])
    valid_dst = MyDataset(x_val, y_val, transform=transforms['val'])

    train_loader = torch.utils.data.DataLoader(train_dst,
                                               batch_size=cfg.bs,
                                               shuffle=True,
                                               pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(valid_dst,
                                               batch_size=cfg.bs,
                                               shuffle=False,
                                               pin_memory=True)

    # 得到均值方差
    print(utils.get_mean_and_std(train_dst))
    # 使用多模型融合
    models_list = get_model(cfg.model_names)
    for i, cur_cnn in enumerate(models_list):
        cnn = cur_cnn
        # 因为要保存model
        name = cfg.model_names[i] + '.pkl'
        cnn.to(device)
        # 训练数据
        loss_fn = nn.CrossEntropyLoss()
        # loss_fn = utils.LabelSmoothingCrossEntropy()
        optimizer = optim.Adam(cnn.parameters(), lr=cfg.lr, weight_decay=1e-4)
        # optimizer = optim.SGD(cnn.parameters(), lr=cfg.lr, momentum=0.9, nesterov=True)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         mode='max',
                                                         patience=3,
                                                         verbose=True)
        print("训练中的模型 %s" % name)
Ejemplo n.º 13
0
param = torch.load(model_dir).copy()
param['eval_n_crop'] = 10
param['batch_size'] = 3
param.pop('state_dict')
param.pop('optim_dict')
param.pop('sched_dict', None)
param.pop('restore_file', None)
param.pop('model_dir', None)

# data_gen_test = DataGenerator('/home/datasets/Kitti/', phase='test')
data_gen_test = DataGenerator('/home/datasets/CADC/cadcd/', '/home/datasets_mod/CADC/cadcd/', phase='inference', cam=0, depth_mode='dror')
print('val or test data size:', len(data_gen_test.dataset))
dataloader_test = data_gen_test.create_data(batch_size=param['batch_size'])
data = next(iter(dataloader_test))
print('img shape:', data['img'].shape)
print('depth shape:', data['depth'].shape)

model = net.get_model(param['mode'], pretrained=False)
model.cuda()

utils.load_checkpoint(model_dir, model)
print('Load model parameters done')

inference(model, dataloader_test, param)

# fig = plt.figure()
# plt.hist(t_inference, bins=20)
# plt.show()

# print('inference time average: %.3f\ninference time variance: %.3f' % (np.mean(t_inference), np.var(t_inference)))
Ejemplo n.º 14
0
               help='ip address for pastalog (default: None)')

if __name__ == '__main__':
    args = p.parse_args()

    print('... Prepare directories')
    dirs = prepare_dirs(args.rootdir, args.name)

    if args.model_path:
        from keras.models import model_from_json
        from net import load_model
        print('... Load network from: {}'.format(args.model_path))
        model = load_model(args.model_path)
    else:
        from net import get_model
        model = get_model(args.img_size, args.beta, args.kernel_size,
                          args.nb_filters)
        model_definition_file = dirs.modelsdir / 'model.json'
        print(
            '... Save model architecture to: {}'.format(model_definition_file))
        import json
        with model_definition_file.open('w') as f:
            json.dump(json.loads(model.to_json()), f)

    if args.weights_path:
        print('... Load parameters from: {}'.format(args.weights_path))
        model.load_weights(args.weights_path)

    print('... Compile model')
    model.compile(optimizer=Adam(lr=args.lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
# #### Training

# Initialize the model. Possible Values for the task block type: MLP, LSTM, GRU, TempConv

# In[3]:

params = {
    'name': 'test',
    'type_': 'MLP',
    'lr': 3e-4,
    'n_h': 128,
    'p': 0.5,
    'seq_len': 1
}
model, opt = get_model(params)

# In[4]:

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# get the data loader. get mini data gets only a subset of the training data, on which we can try if the model is able to overfit

# In[5]:

train_dl, valid_dl = get_data(data_path, model.params.seq_len, batch_size=16)
# train_dl, valid_dl = get_mini_data(data_path, model.params.seq_len, batch_size=16, l=4000)

# uncomment the next cell if the feature extractor should also be trained
Ejemplo n.º 16
0
def get_results(x_train, x_valid, x_test, y_train, y_valid, y_test, idx2vec,
                emb_dim, valid, lang, emb_src, model, k, nn_dims):
    """Get the clasiification results by the model(knn, net)."""
    vectors = _get_vectors(x_train, x_valid, x_test, idx2vec)

    if model == 'knn':
        database = vectors[0]
        if valid:
            queries = vectors[1]
        else:
            queries = vectors[3]

        y_pred = _classify(np.vstack(database), np.vstack(queries), emb_dim, k,
                           y_train)
    elif model == 'nn':
        X_train, X_valid, X_test = np.vstack(vectors[0]), np.vstack(vectors[1]),\
            np.vstack(vectors[2])

        num_classes = np.unique(y_test).shape[0]
        y_train = np.vstack(y_train)
        y_train = np.squeeze(np.eye(num_classes)[y_train.reshape(-1)])
        y_valid = np.vstack(y_valid)
        y_valid = np.squeeze(np.eye(num_classes)[y_valid.reshape(-1)])

        model = get_model(emb_dim, nn_dims, num_classes)
        # print(model.summary())

        model.fit(X_train,
                  y_train,
                  validation_data=(X_valid, y_valid),
                  callbacks=[
                      EarlyStopping(monitor='val_loss',
                                    patience=5,
                                    mode='min',
                                    verbose=0),
                      ModelCheckpoint(
                          '../data/checkpoints/best_model_{}_{}.h5'.format(
                              lang, emb_src),
                          monitor='val_accuracy',
                          mode='max',
                          verbose=0,
                          save_best_only=True)
                  ],
                  verbose=0,
                  epochs=100)

        if valid:
            y_pred = model.predict(X_valid)
        else:
            y_pred = model.predict(X_test)

        y_pred = np.argmax(y_pred, axis=1)
        y_valid = np.argmax(y_valid, axis=1)

    if valid:
        accuracy = accuracy_score(y_valid, y_pred)
    else:
        accuracy = accuracy_score(y_test, y_pred)

    # if np.unique(y_test).shape[0] == 2:
    #     avg = 'binary'
    # else:
    #     avg = 'macro'

    # f1_value = f1_score(y_test, y_pred, average=avg)
    f1_value = 0

    return accuracy, f1_value