Exemple #1
0
 def embed(self, sentence):
     vector = []
     for word in sentence:
         if str(word) in self.vocab:
             vector = np.concatenate((vector, self.vocab[str(word)]), axis=0)
         else:
             vector = np.concatenate((vector, [0]*len(self.vocab['a'])), axis=0)
     return vector
Exemple #2
0
if __name__ == "__main__":
    #print ('warming up')
    _ = handle_one(np.ones((320,320,3)))
    
    video_capture = cv2.VideoCapture(0)
    i=-1
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        i=i+1
        if i%4==0:
            canvas = handle_one(frame)
            canvas = cv2.resize(canvas,(256,256))
            #cv2.imwrite("0.png", canvas)
            #canvas=cv2.imread('145.png')
            temp1=np.concatenate([canvas, canvas], 1)
            cv2.imwrite("./temp/test/0.png", temp1)
            dataset = data_loader.load_data()
            for i, data in enumerate(dataset):
                data1=data
                if i>0:                    
                    break
            #canvas=cv2.imread('145.png')
            #temp1=np.reshape(canvas,(1,3,canvas.shape[0],canvas.shape[1]))
            #temp1.astype(float)
            #temp2 = torch.from_numpy(temp1/255)
            #temp2 = temp2.view(1,3,256,256)
            #cv2.imwrite("./temp/test/temp.jpg", temp1)
            #data1={}
            #data1['A']=temp2
            #data1['A_paths']='145.png'
def main():
    # cofiguration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    torch.manual_seed(args.seed)
    if args.cuda:
        print('using CUDA with gpu_id:')
        print(args.gpu_id)
        torch.cuda.manual_seed(args.seed)
    # dataset
    unlabeledset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-unlabeled-scaled-tensor.t7')
    unlabeledsetnp = unlabeledset.numpy()
    trainset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-train-scaled-tensor.t7')
    trainsetnp = trainset.numpy()
    testset = load_lua(
        '/home/hankuan1993/dataset/stl10/stl10-test-scaled-tensor.t7')
    testsetnp = testset.numpy()
    trainsetnp = np.concatenate((unlabeledsetnp, trainsetnp), axis=0)
    trainlen = len(trainsetnp)
    testlen = len(testsetnp)
    # model
    model = CVAE()
    if args.loadPrev:
        print('====> loading previously saved model ...')
        model.load_state_dict(torch.load('./cvaeCheckPoint.pth'))
    if args.cuda:
        print('====> loading model to gpu ...')
        model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    # train and evaluate
    for epoch in range(1, args.epochs + 1):
        # train
        model.train()
        train_loss = 0
        traintime = math.ceil(trainlen / args.batch_size)
        shuffleidx = np.random.permutation(trainsetnp.shape[0])
        for batch_idx in range(traintime):
            datanp = trainsetnp[shuffleidx[batch_idx *
                                           args.batch_size:(batch_idx + 1) *
                                           args.batch_size], :, :, :].astype(
                                               np.float32) / 255.0
            data = torch.from_numpy(datanp)
            data = Variable(data)
            if args.cuda:
                data = data.cuda()
            optimizer.zero_grad()
            recon_batch, mu, logvar = model(data)
            loss = loss_function(recon_batch, data, mu, logvar)
            loss.backward()
            train_loss += loss.data[0]
            optimizer.step()


#            if batch_idx % args.log_interval == 0:
#                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
#                    epoch, batch_idx * len(data), len(train_loader.dataset),
#                    100. * batch_idx / len(train_loader),
#                    loss.data[0] / len(data)))
        print('====> Epoch: {} Train Average loss: {:.4f}'.format(
            epoch, train_loss / traintime / args.batch_size))

        # evaluate
        model.eval()
        test_loss = 0
        for test_idx in range(testlen):
            datanp = testsetnp[test_idx, :, :, :].astype(np.float32) / 255.0
            data = torch.from_numpy(datanp)
            if args.cuda:
                data = data.cuda()
            data = Variable(data, volatile=True)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).data[0]

        test_loss /= testlen
        print('====> Test set loss: {:.4f}'.format(test_loss))
    # Since training is finished, save it! :)
    torch.save(model.state_dict(), './cvaeCheckPoint.pth')