示例#1
0
def main():
    X_tr, y_tr, X_te, y_te = load_data()
    X_tr, y_tr = X_tr[:1024], y_tr[:1024]
    X_te, y_te = X_te[:128], y_te[:128]
    if args.model == 'cnn':
        model = ConvNet()
        model_save_path = config.CNN_MODEL_PATH
    else:
        model = CapsuleNet()
        model_save_path = config.CAPSULE_MODEL_PATH

    model.to(device)
    optimizer = Adam(model.parameters())
    train_loss = []
    train_accuracy = []
    best_acc = 0.0
    for epoch in range(10):
        print(("Epoch %d " + "-" * 70) % (epoch + 1))
        loss = train(model, optimizer, X_tr, y_tr)
        train_loss.append(loss)
        acc = test(model, X_tr, y_tr, "Train")
        train_accuracy.append(acc)
        if acc > best_acc:
            best_acc = acc
            torch.save(model.state_dict(), model_save_path)
    pickle.dump((train_loss, train_accuracy), \
        open('result/' + args.model + '_train.p', 'wb'))
示例#2
0
def inference_model(network,lstm_out,out_format,model_path):
  doa_classes = DoaClasses()
  if out_format == "cartesian":
    out_dim = 3
  elif out_format == "class":
    out_dim = len(doa_classes.classes)
  
  if network == "CNN":
    model = ConvNet(device, Dropouts(0,0,0), out_dim, doa_classes)
  elif network == "CRNN":
    model = CRNN(device, Dropouts(0,0,0), out_dim, doa_classes, lstm_out)
  model.load_state_dict(torch.load(model_path,map_location=device))
  model.eval()
  model.to(device)
  
  return model,doa_classes
示例#3
0
                              pin_memory=True)
    val_loader = DataLoader(AudioDataset(
        path=os.path.join("audio", "validation"),
        sample_rate=config["sample_rate"],
        n_mels=config["n_mels"],
        n_fft=config["n_fft"],
        win_length=config["win_length"],
        hop_length=config["hop_length"],
    ),
                            batch_size=config["batch_size"],
                            shuffle=True,
                            pin_memory=True)

    # Initialize model, loss function, optimizers, and lr scheduler
    model = ConvNet(base=4)
    model.to(device)
    loss_fn = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"])
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=10,
                                             gamma=0.1)

    # Initialize wandb
    wandb.init(project="torch", config=config)
    wandb.watch(model, log="all")

    # Start training
    for epoch in range(1, config["n_epochs"] + 1):
        print(f"Epoch {epoch}/{config['n_epochs']}")
        start_time = time.time()
        train_loss = 0
示例#4
0
#%% Define the optimizer, loss function and metrics
optimizer = optim.Adam(model.parameters(1e-4))
metrics_fn = metrics

# Weight balancing
if args.weight_balance == True and torch.cuda.device_count() == 0:
    criterion = nn.CrossEntropyLoss(
        weight=torch.FloatTensor(helper.cls_weight))
elif args.weight_balance == True and torch.cuda.device_count() > 0:
    criterion = nn.CrossEntropyLoss(
        weight=torch.FloatTensor(helper.cls_weight).cuda())
else:
    criterion = nn.CrossEntropyLoss()

if torch.cuda.device_count() > 1:  # multiple GPUs
    model = nn.DataParallel(module=model)
model = model.to(device)
criterion = criterion.to(device)

#%% Train the model
train_evaluate(model, train_iterator, valid_iterator, criterion, optimizer,
               metrics_fn, args)

#%% Test
if args.save_model:
    test_scores = test(model,
                       test_iterator,
                       criterion,
                       metrics_fn,
                       args,
                       restore_file='best')
示例#5
0
g_loss = np.zeros((5000, 1))

allfolders = glob.glob('./result/*0')
lastepoch = 0
for folder in allfolders:
    lastepoch = np.maximum(lastepoch, int(folder[-4:]))

learning_rate = 1e-4

criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = ConvNet()
net.to(device)

for epoch in range(lastepoch, 4001):
    if os.path.isdir("result/%04d" % epoch):
        continue
    cnt = 0
    if epoch > 2000:
        learning_rate = 1e-5
        optimizer = optim.Adam(net.parameters(), lr=learning_rate)

    net.to(device)
    num_ids = len(train_ids)
    running_loss = 0.0
    for ind in np.random.permutation(num_ids):
        # get the path from image id
        train_id = train_ids[ind]