def main(config,args):
   
    
    use_cuda = config['use_gpu']
    device = torch.device("cuda" if use_cuda==1 else "cpu")
    model = Siamese()
    model = model.to(device)
    
    rec_loss = nn.L1Loss()
    cosine_loss = nn.CosineSimilarity(dim=1)
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0, betas=(0.9, 0.98), eps=1e-9)
    dataset_train = SpeechDataGenerator(args.clean_file,args.noisy_file,batch_s=100)
    dataloader_train = DataLoader(dataset_train, batch_size=1,shuffle=True,collate_fn=speech_collate) 
    for epoch in range(1, config['num_epochs'] + 1):
        train_loss=train(model,dataloader_train,epoch,optimizer,device,rec_loss,cosine_loss)
Beispiel #2
0
model_dir = 'models/snapshot/'
model_load_path = os.path.join(model_dir, 'snapshot_epoch_1.pt')
gConfig = get_config()
gConfig.model_dir = model_dir

criterion = nn.HingeEmbeddingLoss()
model = Siamese()

package = torch.load(model_load_path)

model.load_state_dict(package['state_dict'])
model.eval()
print('Model loaded from {}'.format(model_load_path))

logging('Model configuration:\n{}'.format(model))

modelSize, nParamsEachLayer = modelSize(model)
logging('Model size: {}\n{}'.format(modelSize, nParamsEachLayer))

params = model.parameters()

for i, a_param in enumerate(params):
    print a_param

exit(0)

imagePath = '../data/demo.png'
img = loadAndResizeImage(imagePath)
text, raw = recognizeImageLexiconFree(model, img)
print('Recognized text: {} (raw: {})'.format(text, raw))
        else:
            stacked = np.hstack((rand_loss, rand_win))
            stacked = torch.from_numpy(stacked).type(torch.FloatTensor)
            label = torch.from_numpy(np.array([0, 1])).type(torch.FloatTensor)
            return (stacked, label)

    def __len__(self):
        return self.length

train_loader = torch.utils.data.DataLoader(TrainSet(1000000),batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(TestSet(100000),batch_size=batch_size, shuffle=True)


print('Buidling model...')
model = Siamese().to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)

e = enumerate(train_loader)
b, (data, label) = next(e)


# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(pred, label):
    BCE = F.binary_cross_entropy(pred, label, size_average=False)
    return BCE


def train(epoch):
    model.train()
    train_loss = 0
    for batch_idx, (data, label) in enumerate(train_loader):