Пример #1
0
    model_rgb = model_rgb.cuda()
    model_depth = model_depth.cuda()
    model_fusion = model_fusion.cuda()

if args.phase == 'train':

    # Trainer: class, defined in trainer.py
    optimizer_rgb = optim.SGD(model_rgb.parameters(),
                              lr=cfg['lr'],
                              momentum=cfg['momentum'],
                              weight_decay=cfg['weight_decay'])
    optimizer_depth = optim.SGD(model_depth.parameters(),
                                lr=cfg['lr'],
                                momentum=cfg['momentum'],
                                weight_decay=cfg['weight_decay'])
    optimizer_fusion = optim.SGD(model_fusion.parameters(),
                                 lr=cfg['lr'],
                                 momentum=cfg['momentum'],
                                 weight_decay=cfg['weight_decay'])

    training = Trainer(cuda=cuda,
                       model_rgb=model_rgb,
                       model_depth=model_depth,
                       model_fusion=model_fusion,
                       optimizer_rgb=optimizer_rgb,
                       optimizer_depth=optimizer_depth,
                       optimizer_fusion=optimizer_fusion,
                       train_loader=train_loader,
                       max_iter=cfg['max_iteration'],
                       snapshot=cfg['spshot'],
                       outpath=args.snapshot_root,
Пример #2
0
    pilot_val = np.array(pilot_val)
    pilot_val = pilot_val.reshape(num_val, -1)
    pilot_val = de_complex(pilot_val)
    pilot_val = add_noise(pilot_val, 20)

    #merging the pilot with the sub6G signal
    #total_train = np.concatenate((train_x,pilot_train),axis = 1)
    #total_val   = np.concatenate((val_x,pilot_val),axis = 1)
    total_train = pilot_train
    total_val = pilot_val
    #####   The training phase!   ######

    num_sample = train_x.shape[0]

    fcn = Fusion()
    optimizer = optim.Adam(fcn.parameters(), lr=lr)
    loss_fun = nn.CrossEntropyLoss()

    fcn.train()
    for epoch in range(epoches):
        print('epoch', epoch)
        adjust_lr(optimizer, epoch)
        # have shuffled already in the data preprocessing process

        for i in range(int(num_sample / batch_size) + 1):

            if ((i + 1) * batch_size <= num_sample):
                X = train_x[i * batch_size:(i + 1) * batch_size, :]
                P = pilot_train[i * batch_size:(i + 1) * batch_size, :]
                Y = train_labels[i * batch_size:(i + 1) * batch_size]
            else: