示例#1
0
def train(data_loader, alpha=0.001, n_epoch=100):
    m = CNN() # initialize the model
    optimizer = th.optim.SGD(m.parameters(), lr=alpha) # create an SGD optimizer
    for _ in range(n_epoch): # iterate through the dataset n_epoch times
        for mini_batch in data_loader: # iterate through the dataset, with one mini-batch of random training samples (x,y) at a time
            x=mini_batch[0] # the gray-scale images in a mini-batch
            y=mini_batch[1] # the labels of the images in a mini-batch
            #########################################
            ## INSERT YOUR CODE HERE (5 points)
            z = compute_z(x, m)
            L = compute_L(z, y)
            L.backward()
            update_parameters(optimizer)
            #########################################
    return m
    #-----------------
    '''  
        TEST: Now you can test the correctness of your code above by typing the following in the terminal:
        ---------------------------------------------------
        nosetests -v test3.py:test_train
        --- OR ---- 
        python3 -m nose -v test3.py:test_train
        --- OR ---- 
        python -m nose -v test3.py:test_train
        ---------------------------------------------------
    '''
    
    ''' 
示例#2
0
    def __init__(self, n_lattice=16, n_actions=4, loadpath=None, norm=10, lr=0.001, soft=0.1, decay=0.9, batch_size=128, capacity=10000, freq=100):
        self.n_lattice = n_lattice
        self.n_actions = n_actions
        self.norm = norm
        self.lr = lr
        self.soft = soft
        self.decay = decay
        self.batch_size = batch_size
        self.capacity = capacity
        self.freq = freq
        self.prediction_net = CNN()
        self.target_net = CNN()

        if loadpath is not None:
            net = torch.load(loadpath+'/CNN3.pth')
            self.prediction_net.load_state_dict(net.state_dict())
        
        self.target_net.load_state_dict(self.prediction_net.state_dict())

        self.clip = 1
        self.stepcnt = 0
        self.memorycnt = 0
        self.states = np.zeros((capacity, 16, 4, 4))
        self.actions = np.zeros((capacity, 1))
        self.rewards = np.zeros((capacity, 1))
        self.stateps = np.zeros((capacity, 16, 4, 4))
        self.optimizer = torch.optim.SGD(self.prediction_net.parameters(), lr=self.lr)
        from game2048.expectimax import board_to_move
        self.consultant = board_to_move
示例#3
0
def test(run):
    print('Testing ' + str(run))
    # getting the test data
    test_set = torchvision.datasets.FashionMNIST(root='./data/FashionMNIST',
                                                 train=False,
                                                 download=True,
                                                 transform=transforms.Compose(
                                                     [transforms.ToTensor()]))
    # load the dataset
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=100,
                                              shuffle=False)

    # loading the specified model
    cnn = CNN()
    cnn.load(run)

    test_loss = 0
    test_total_correct = 0
    test_accuracy = 0

    for batch in test_loader:
        images, labels = batch
        preds = cnn(images)
        loss = F.cross_entropy(preds, labels)

        test_loss += loss.item()
        test_total_correct += F.softmax(
            preds, dim=1).argmax(dim=1).eq(labels).sum().item()

    test_loss /= len(test_set)
    test_accuracy = test_total_correct / len(test_set)

    print('Testing ended.. total_correct: {}, loss: {}, accuracy: {}'.format(
        test_total_correct, test_loss, test_accuracy))
示例#4
0
文件: main.py 项目: codepike/iceberg
def main(_):
    keep_prob = 0.5 if config.mode == 'train' else 1.0
    if config.mode == 'train':
        reader = TFReader(config.data_path, config.epoch, config.batch_size,
                          [75 * 75 * 2], [1])
        cnn_model = CNN(reader,
                        config.mode,
                        keep_prob=keep_prob,
                        learning_rate=config.learning_rate)
        # resnet = Resnet(reader, config.mode, keep_prob=0.5, learning_rate=config.learning_rate)
        train(cnn_model, config)
    elif config.mode == 'evaluate':
        reader = TFReader(config.data_path, config.epoch, config.batch_size,
                          [75 * 75 * 2], [1])
        cnn_model = CNN(reader,
                        config.mode,
                        keep_prob=keep_prob,
                        learning_rate=config.learning_rate)
        # resnet = Resnet(reader, config.mode, keep_prob=keep_prob)
        evaluate(cnn_model, config)
    elif config.mode == 'predict':
        reader = DefaultReader(None)
        cnn_model = Resnet(reader, config.mode, keep_prob=keep_prob)
        predict(cnn_model, config)
    elif config.mode == 'batch_predict':
        reader = TFReader(config.data_path,
                          1,
                          config.batch_size, [75 * 75 * 2], [1],
                          shuffle=False)
        # resnet = Resnet(reader, config.mode, keep_prob=keep_prob)
        cnn_model = CNN(reader, config.mode, keep_prob=keep_prob)
        batch_predict(cnn_model, config)
示例#5
0
def main():
    seed = 1
    learning_rate = 0.1
    batch_size = 64
    epochs = 5
    torch.manual_seed(seed)

    device = torch.device("cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        'data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)

    model = CNN().to(device)
    optimizer = optim.Adadelta(model.parameters(), lr=learning_rate)

    scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
    for epoch in range(1, epochs + 1):
        train(model, device, train_loader, optimizer, epoch)
        scheduler.step()

    torch.save(model.state_dict(), "mnist_model.pt")
示例#6
0
def train(args):
    assert args.num_classes
    common.make_dir(args.checkout_dir)
    nnet = CNN(args.left_context + args.right_context + 1, args.feat_dim, num_maps, pooling_size,
            filter_size, conn_dim, args.num_classes)
    print(nnet)
    nnet.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = th.optim.Adam(nnet.parameters(), lr=args.learning_rate)

    train_dataset = THCHS30(root=args.data_dir, data_type='train', left_context=left_context,
            right_context=right_context, model_type='cnn')
    train_loader  = data.DataLoader(dataset=train_dataset, batch_size=args.min_batch,
                                    shuffle=True, num_workers=6)

    test_dataset = THCHS30(root=args.data_dir, data_type='test', left_context=left_context,
            right_context=right_context, model_type='cnn')
    test_loader  = data.DataLoader(dataset=test_dataset, batch_size=args.min_batch,
                                    shuffle=True, num_workers=6)

    cross_validate(-1, nnet, test_dataset, test_loader) 
    for epoch in range(args.num_epochs):
        common.train_one_epoch(nnet, criterion, optimizer, train_loader)
        cross_validate(epoch, nnet, test_dataset, test_loader) 
        th.save(nnet, common..join_path(args.checkout_dir, 'cnn.{}.pkl'.format(epoch + 1)))
示例#7
0
def train_all(gv=global_var):
    for project_name, sources in cgv.projects.items():
        for i in range(len(sources) - 1):
            print('begin training %s' % sources[i])
            md.train_and_test_cnn(project_name=project_name,
                                  train_name=sources[i],
                                  test_name=sources[i + 1])
            md.train_and_test_cnn_p(project_name=project_name,
                                    train_name=sources[i],
                                    test_name=sources[i + 1])
            pc.train_and_test_cnn(project_name=project_name,
                                  train_name=sources[i],
                                  test_name=sources[i + 1],
                                  dict_params=gv.plain_cnn_params)
            pc.train_and_test_cnn_p(project_name=project_name,
                                    train_name=sources[i],
                                    test_name=sources[i + 1],
                                    dict_params=gv.plain_cnn_params)
            dn.train_and_test_dbn(project_name=project_name,
                                  train_name=sources[i],
                                  test_name=sources[i + 1],
                                  dict_params=gv.dbn_params)
            dn.train_and_test_dbn_plus(project_name=project_name,
                                       train_name=sources[i],
                                       test_name=sources[i + 1],
                                       dict_params=gv.dbn_params)
示例#8
0
 def __init__(self,
              screen_height=0,
              screen_width=0,
              n_actions=0,
              gamma=0.999,
              epsilon_start=0.9,
              epsilon_end=0.05,
              epsilon_decay=200,
              memory_capacity=10000,
              batch_size=128,
              device="cpu"):
     self.actions_count = 0
     self.n_actions = n_actions  # 总的动作个数
     self.device = device  # 设备,cpu或gpu等
     self.gamma = gamma
     # e-greedy策略相关参数
     self.epsilon = 0
     self.epsilon_start = epsilon_start
     self.epsilon_end = epsilon_end
     self.epsilon_decay = epsilon_decay
     self.batch_size = batch_size
     self.policy_net = CNN(screen_height, screen_width,
                           n_actions).to(self.device)
     self.target_net = CNN(screen_height, screen_width,
                           n_actions).to(self.device)
     self.target_net.load_state_dict(
         self.policy_net.state_dict())  # target_net的初始模型参数完全复制policy_net
     self.target_net.eval()  # 不启用 BatchNormalization 和 Dropout
     self.optimizer = optim.RMSprop(self.policy_net.parameters(
     ))  # 可查parameters()与state_dict()的区别,前者require_grad=True
     self.loss = 0
     self.memory = ReplayBuffer(memory_capacity)
    def __init__(
            self,
            loss_flag=False,
            checkpoint_name='./final_checkpoint/re3_final_checkpoint.pth'):

        self.device = device
        self.CNN = CNN(1, 1).to(self.device)
        self.RNN = RNN(CNN_OUTPUT_SIZE, 1, 1, True).to(self.device)
        if os.path.isfile(checkpoint_name):
            checkpoint = torch.load(checkpoint_name, map_location='cpu')
            self.CNN.load_state_dict(checkpoint['cnn_model_state_dict'])
            self.RNN.load_state_dict(checkpoint['rnn_model_state_dict'])

        else:
            print("Invalid/No Checkpoint. Aborting...!!")
            sys.exit()
        self.CNN = self.CNN.to(device)
        self.RNN = self.RNN.to(device)
        self.forward_count = -1
        self.previous_frame = None
        self.cropped_input = np.zeros((2, 3, CROP_SIZE, CROP_SIZE),
                                      dtype=np.float32)
        self.calculate_loss = loss_flag
        self.criterion = nn.MSELoss()
        self.MSE_loss = 0
示例#10
0
 def __init__(self, ds_path, lr, iterations, batch_size, hidden_layers_out, 
              print_freq, save_dir, momentum, dropout):
     
     self.train_data = torchvision.datasets.MNIST(ds_path, train=True, 
                                                    transform=transforms.ToTensor(), 
                                                    download=True)
     
     self.test_data = torchvision.datasets.MNIST(ds_path, train=False, 
                                                   transform=transforms.ToTensor(), 
                                                   download=True)
     
     self.train_loader = torch.utils.data.DataLoader(self.train_data, batch_size=batch_size, shuffle=True)
     self.test_loader = torch.utils.data.DataLoader(self.test_data, batch_size=batch_size)
     
     self.save_dir = save_dir
     self.is_momentum = (momentum != 0.0)
     
     # Set Model Hyperparameters
     self.learning_rate = lr
     self.iterations = iterations
     self.print_freq = print_freq
     self.model = CNN(hidden_layers_out, dropout=dropout)
     
     self.cuda = torch.cuda.is_available()
     
     if self.cuda:
         self.model = self.model.cuda()
示例#11
0
def test_update_parameters():
    ''' (5 points) update_parameters'''
    x = th.zeros(2,1,64,64) # a mini-batch of 2 gray-scale images (1 channel) of size 64 X 64
    y = th.zeros(2,1)
    m = CNN()
    m.conv1.bias.data = th.zeros(10)
    m.conv2.bias.data = th.zeros(20)
    m.conv3.bias.data = th.zeros(30)
    m.fc.bias.data = th.zeros(1)
    optimizer = th.optim.SGD(m.parameters(), lr=0.1)
    z=m(x)
    L = compute_L(z,y)
    assert np.allclose(L.data,0.6931,atol=0.01)
    L.backward()
    update_parameters(optimizer)
    assert np.allclose(m.fc.bias.data,[-0.05],atol=0.01)
    assert np.allclose(m.conv3.bias.data,np.zeros(30),atol=0.01)
    x = th.ones(4,1,64,64)
    y = th.tensor([[1.],[0.],[1.],[0.]])
    m.conv1.bias.data = th.zeros(10)
    m.conv2.bias.data = th.zeros(20)
    m.conv3.bias.data = th.zeros(30)
    m.fc.bias.data = th.zeros(1)
    optimizer = th.optim.SGD(m.parameters(), lr=1.)
    z=m(x)
    L = compute_L(z,y)
    L.backward()
    update_parameters(optimizer)
    assert not np.allclose(m.conv3.bias.data,np.zeros(30))
    th.save(m,"cnn.pt") # save the CNN for demo
示例#12
0
def webcam():
    model = CNN()
    model.load_state_dict(torch.load('outputs/model'))

    transform = torchvision.transforms.Compose([
        torchvision.transforms.ToPILImage(),
        torchvision.transforms.Resize(150, interpolation=Image.BILINEAR),
        torchvision.transforms.CenterCrop((150, 200)),
        torchvision.transforms.ToTensor()
    ])

    cap = cv2.VideoCapture(0)
    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            img = transform(frame)
            img_batch = img.unsqueeze(0)
            outputs, _ = model(img_batch)
            bbxs = find_batch_bounding_boxes(outputs)[0]
            img = util.draw_bounding_boxes(img, bbxs[Label.ROBOT.value], (0, 0, 255))
            img = util.draw_bounding_boxes(img, bbxs[Label.BALL.value], (255, 0, 0))
            util.stream_image(img, wait=25, scale=4)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
    cap.release()
    cv2.destroyAllWindows()
示例#13
0
def demo_cnn():
    X,y,w = load2d()
    y = flatten_except_1dim(y,ndim=3)
    w = flatten_except_1dim(w,ndim=2)
    print(X.shape)
    print(y.shape)
    print(w.shape)
    model = CNN()
    hist = model.fit(X, y, sample_weight=w, epochs=EPOCH,batch_size=128, validation_split=0.2)
    # plot_loss(hist.history,"CNN model",plt)
    # plt.legend()
    # plt.grid()
    # plt.yscale("log")
    # plt.xlabel("epoch")
    # plt.ylabel("loss")
    # plt.show()
    model.save('./models/cnn_weighted_model.h5')

    X_test,_,_ = load2d(test=True)
    # y_test = model.predict(X_test)
    # fig = plt.figure(figsize=(10, 7))
    # fig.subplots_adjust(
    #     left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    # for i in range(16):
    #     axis = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
    #     plot_sample(X_test[i], y_test[i], axis)
    # plt.show()
    df_y_pred = predict_single(model, X_test)
    prepare_submission(df_y_pred,"cnn_weighted")
示例#14
0
def test_model(null_split):
    use_cuda = torch.cuda.is_available()
    torch.manual_seed(1)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    test_data = train_files('/home/hansencb/MNIST/Test')

    test_dataset = Test_Dataset(test_data)

    batch_size = 128
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, **kwargs)

    model = CNN(1,10).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

    model_file = 'models/saved_model_split_{}'.format(null_split)
    model.load_state_dict(torch.load(model_file))

    loss, accuracy, conf_matrix, correct, incorrect = test(model, device, test_loader)

    correct_file = 'correct_lists/list_correct_model_split_{}'.format(null_split)
    with open(correct_file, 'w') as f:
        for i in correct:
            line = '{} {} {}\n'.format(i[0], str(i[1]), str(i[2]))
            f.write(line)
        for i in incorrect:
            line = '{} {} {}\n'.format(i[0], str(i[1]), str(i[2]))
            f.write(line)

    return accuracy
示例#15
0
文件: cv.py 项目: yuzilan/DeepDTA-tf
def main(argv):
    conf = configparser.ConfigParser()
    print(conf.read(argv))
    max_smi_len = conf.getint('model', 'max_smi_len')
    max_seq_len = conf.getint('model', 'max_seq_len')

    data_path = conf.get('model', 'data_path')

    ligands = pd.read_csv(data_path + 'ligands.csv', header=None)
    proteins = pd.read_csv(data_path + 'proteins.csv', header=None)
    inter = pd.read_csv(data_path + 'inter.csv', header=None)
    inter = np.asarray(inter)
    print(ligands.shape, proteins.shape, inter.shape)

    char_smi_set = json.load(open(conf.get('model', 'char_smi')))
    char_seq_set = json.load(open(conf.get('model', 'char_seq')))

    cv_num = conf.getint('cv', 'cv_num', fallback=5)

    problem_type = conf.getint('cv', 'problem_type', fallback=1)
    if problem_type == 1:
        cv_train, cv_valid = new_pair_fold(inter, cv_num)
    elif problem_type == 2:
        cv_train, cv_valid = new_protein_fold(inter, cv_num)
    elif problem_type == 3:
        cv_train, cv_valid = new_ligand_fold(inter, cv_num)

    print(cv_train[0].shape, cv_valid[0].shape)

    sess = tf.InteractiveSession(config=tf.ConfigProto(
        allow_soft_placement=True))
    model = CNN(filter_num=conf.getint('model', 'filter_num'),
                smi_window_len=conf.getint('model', 'smi_window_len'),
                seq_window_len=conf.getint('model', 'seq_window_len'),
                max_smi_len=max_smi_len,
                max_seq_len=max_seq_len,
                char_smi_set_size=len(char_smi_set),
                char_seq_set_size=len(char_seq_set),
                embed_dim=conf.getint('model', 'embed_dim'))

    for cv_id in range(cv_num):
        print('start cv', cv_id)
        model_path = os.path.join(conf.get('model', 'path', fallback='tmp'),
                                  'cv-' + str(cv_id) + '.model')
        trainX, trainy = get_feature(ligands, proteins, inter, cv_train[cv_id],
                                     max_smi_len, char_smi_set, max_seq_len,
                                     char_seq_set)
        validX, validy = get_feature(ligands, proteins, inter, cv_valid[cv_id],
                                     max_smi_len, char_smi_set, max_seq_len,
                                     char_seq_set)
        print(trainX.shape, trainy.shape, validX.shape, validy.shape)
        model.train(sess,
                    trainX,
                    trainy,
                    validX,
                    validy,
                    nb_epoch=conf.getint('model', 'num_epoch'),
                    batch_size=conf.getint('model', 'batch_size'),
                    model_path=model_path)
def load_model(lr):
    model = CNN()

    loss_fnc = torch.nn.CrossEntropyLoss()

    optimizer = torch.optim.SGD(model.parameters(), lr=lr)

    return model, loss_fnc, optimizer
def load_model():
    """
    加载本地模型
    :return:
    """
    model = CNN()
    model.load_weights('./models/cnn3_best_weights.h5')
    return model
示例#18
0
    def __init__(self):
        self.epsilon = EPSILON_MAX
        self.epsilon_min = EPSILON_MIN
        self.epsilon_decay = EPSILON_DECAY
        self.memory = deque(maxlen=MEMORY_SIZE)

        self.offline = CNN((4, 84, 84), 6)
        self.online = CNN((4, 84, 84), 6)
示例#19
0
def train_model(null_split):
    use_cuda = torch.cuda.is_available()
    torch.manual_seed(1)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    train_data = train_files('/home/local/VANDERBILT/hansencb/MNIST/Train')
    test_data = train_files('/home/local/VANDERBILT/hansencb/MNIST/Validate')

    train_dataset = Train_Dataset(train_data, null_split=null_split)
    test_dataset = Test_Dataset(test_data)


    batch_size = 128
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, **kwargs)

    model = CNN(1,10).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)

    train_loss_file = 'results/train_loss_split_{}.txt'.format(null_split)
    f = open(train_loss_file, 'w')
    f.close()
    validate_loss_file = 'results/validate_loss_split_{}.txt'.format(null_split)
    f = open(validate_loss_file, 'w')
    f.close()
    train_accuracy_file = 'results/train_accuracy_split_{}.txt'.format(null_split)
    f = open(train_accuracy_file, 'w')
    f.close()
    validate_accuracy_file = 'results/validate_accuracy_split_{}.txt'.format(null_split)
    f = open(validate_accuracy_file, 'w')
    f.close()

    model_file = 'models/saved_model_split_{}'.format(null_split)

    for epoch in range(1, 101):
        print('\nEpoch %d: ' % epoch)
        loss, accuracy = train(model, device, train_loader, optimizer)

        with open(train_loss_file, "a") as file:
            file.write(str(loss))
            file.write('\n')
        with open(train_accuracy_file, "a") as file:
            file.write(str(accuracy))
            file.write('\n')

        loss, accuracy, confusion, correct_data, incorrect_data = test(model, device, test_loader)

        with open(validate_loss_file, "a") as file:
            file.write(str(loss))
            file.write('\n')
        with open(validate_accuracy_file, "a") as file:
            file.write(str(accuracy))
            file.write('\n')

        if epoch % 5 == 0:
            with open(model_file, 'wb') as f:
                torch.save(model.state_dict(), f)
示例#20
0
def eval_cnn():
    model = CNN()
    config = ConfigManager(model).load()
    optimizer = optim.SGD(model.parameters(),
                          lr=float(config["LEARNING_RATE"]),
                          momentum=float(config["MOMENTUM"]))
    criterion = torch.nn.NLLLoss()
    trainer = Trainer(model, MNIST(batch_size=10), optimizer, criterion, True)
    trainer.evaluate()
示例#21
0
def run_train(args):
    # 设置训练的类型,是provinces, area, letters
    conf.set_train_params(args.type)

    # 载入数据
    train_loader, val_loader, train_size = load_train(conf)
    conf.train_size = train_size

    # 实例化模型和loss函数、优化器
    model = CNN(conf).to(device)
    criterion = torch.nn.CrossEntropyLoss()
    if conf.opt == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=conf.learning_rate)
    else:
        optimizer = torch.optim.Adadelta(model.parameters())

    # 开始训练
    loss_count, acc_count = [], []
    for epoch in range(conf.n_epoch):
        print("=" * 15, "epoch: ", str(epoch), "=" * 20)
        for i, (x, y) in enumerate(train_loader):
            x = x.unsqueeze(1)
            batch_x = Variable(x)
            batch_y = Variable(y)

            output = model(batch_x)
            loss = criterion(output, batch_y)

            optimizer.zero_grad()
            loss.backward()

            optimizer.step()

            if i % conf.print_per_batch == 0:
                loss_count.append(loss)
                print('train step: {} [{}/{} ({:.0f}%)]'.format(
                    i, i * len(batch_x), conf.train_size,
                    100. * i / len(train_loader)))
                batch_acc = 100.0 * (output.argmax(1) == batch_y
                                     ).float().sum().item() / len(batch_x)
                print("\t minibatch loss: %.3f,\t acc: %.1f%%" %
                      (loss.item(), batch_acc))
                acc_count.append(batch_acc)
                print("\t validation accuracy: %.1f%%" %
                      (eval_in_batches(val_loader, model) * 100))

    if args.show:
        show_results(conf.train_type,
                     loss_count,
                     acc_count,
                     conf.result_fig_path,
                     show=False)

    # 保存模型
    if args.save:
        print("save model: %s" % conf.model_path)
        torch.save(model, conf.model_path)
示例#22
0
def load_cnn_model():
    """
    载入CNN模型
    :return:
    """
    from model import CNN
    model = CNN()
    model.load_weights('./models/cnn_best_weights.h5')
    return model
def load_and_train_model(model_path, load_pretrained):
    pcg = PCG(model_path)

    if load_pretrained:
        pcg.load("/tmp")
    else:
        pcg.initialize_wav_data()

    cnn = CNN(pcg, epochs=100, dropout=0.5)
    cnn.train()
示例#24
0
def load_model(lr, optm, afunc, kernal_size, padding, pool, dilation):

    model = CNN(afunc, kernal_size, padding, pool, dilation)
    loss_fnc = torch.nn.CrossEntropyLoss()
    if optm == "SGD":
        optimizer = torch.optim.SGD(model.parameters(), lr = lr, momentum=0.5)
    elif optm == "Adam":
        optimizer = torch.optim.Adam(model.parameters(), lr = lr, eps=1e-8)#, weight_decay=0.001)

    return model, loss_fnc, optimizer
示例#25
0
def load_and_train_model(model_path, load_pretrained):
    pcg = PCG(model_path)

    if load_pretrained:
        pcg.load("/tmp")
    else:
        pcg.initialize_wav_data()

    cnn = CNN(pcg, epochs=200, dropout=0.5)
    cnn.train()
示例#26
0
def train(X_train, y_train, X_val, y_val, X_test, y_test):
    X_train = torch.from_numpy(X_train).float()
    y_train = torch.from_numpy(y_train).long()
    X_test = torch.from_numpy(X_test).float()
    y_test = torch.from_numpy(y_test).long()
    model = CNN()
    # loss_fun = F.cross_entropy
    loss_func = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    if GPU:
        model = model.cuda()
    print('model:', model)
    print(X_train.shape)
    print(y_train.shape)
    trainset = TensorDataset(X_train, y_train)
    # valset = TensorDataset(torch.from_numpy(X_val), torch.from_numpy(y_val))
    testset = TensorDataset(X_test, y_test)
    train_loader = DataLoaderX(trainset,batch_size=16,num_workers=0,pin_memory=True,shuffle=True)
    test_loader = DataLoaderX(testset,batch_size=16,num_workers=0,pin_memory=True)
    train_size = len(X_train) / 16
    preloader = data_prefetcher(train_loader)
    for epoch in tqdm(range(EPOCH)):
        print('start epoch', epoch, ' / ', EPOCH)
        running_loss = 0.0
        e_st = time.time()
        x, y = preloader
        for i, (x, y) in enumerate(tqdm(train_loader)):
            if i >= train_size - 1 :
                continue
            x = x.cuda()
            y = y.cuda()
            y_hat = model(x)
            loss = loss_func(y_hat, y).cuda()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            running_loss += loss.data.item()
            if i % 100 == 99:
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 100))
                running_loss = 0.0
        e_et = time.time()
        print('epoch:', epoch, ' use ', show_time(e_et-e_st))
    print("Finished Training")

    print("Beginning Testing")
    correct = 0
    total = 0
    for data in test_loader:
        x, y = data
        y_hat = model(x.float())
        _, pred = torch.max(y_hat.data, 1)
        total += y.size(0)
        correct += (pred == y).sum()
    print('Accuracy of model on test set:%d %%' % (100 * correct / total))
示例#27
0
    def model_creator(layers):
        model = CNN(32, 3, args.channels, 10, layers, n_nodes=args.nodes)
        criterion = nn.CrossEntropyLoss()

        optim = torch.optim.SGD(model.parameters(),
                                0.025,
                                momentum=0.9,
                                weight_decay=3.0E-4)
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optim, args.epochs, eta_min=0.001)

        return model, criterion, optim, lr_scheduler
示例#28
0
def load_model(params):
    path = f"saved_models/{params['DATASET']}_static_{params['EPOCH']}.pt"

    try:
        model = CNN(**params)
        model.load_state_dict(torch.load(path))
        print(f"Model in {path} loaded successfully!")
        return model

    except:
        print(f"No available model such as {path}.")
        exit()
示例#29
0
def main(argv):
    conf = configparser.ConfigParser()
    print(conf.read(argv))
    max_smi_len = conf.getint('model', 'max_smi_len')
    max_seq_len = conf.getint('model', 'max_seq_len')

    data_path = conf.get('model', 'data_path')

    ligands = pd.read_csv(data_path + 'ligands.csv',
                          header=None,
                          names=['id', 'smi'])
    proteins = pd.read_csv(data_path + 'proteins.csv',
                           header=None,
                           names=['id', 'seq'])
    pairs = pd.read_csv(data_path + 'pairs.csv', header=None)

    print(ligands.shape, proteins.shape, pairs.shape)

    char_smi_set = json.load(open(conf.get('model', 'char_smi')))
    char_seq_set = json.load(open(conf.get('model', 'char_seq')))

    smi_feature, seq_feature = get_data(ligands, proteins, max_smi_len,
                                        max_seq_len, char_smi_set,
                                        char_seq_set)
    print(smi_feature.shape, seq_feature.shape)

    sess = tf.InteractiveSession(config=tf.ConfigProto(
        allow_soft_placement=True))
    model = CNN(filter_num=conf.getint('model', 'filter_num'),
                smi_window_len=conf.getint('model', 'smi_window_len'),
                seq_window_len=conf.getint('model', 'seq_window_len'),
                max_smi_len=max_smi_len,
                max_seq_len=max_seq_len,
                char_smi_set_size=len(char_smi_set),
                char_seq_set_size=len(char_seq_set),
                embed_dim=conf.getint('model', 'embed_dim'))

    trainy = np.asarray(pairs.iloc[:, 2]).reshape([-1, 1])
    trainX = []
    for idx, row in pairs.iterrows():
        ligand_index = ligands[ligands.id == row[0]].index.values[0]
        protein_index = proteins[proteins.id == row[1]].index.values[0]
        trainX.append([smi_feature[ligand_index], seq_feature[protein_index]])
    trainX = np.asarray(trainX)

    model_path = os.path.join(conf.get('model', 'path', fallback='tmp'),
                              'all.model')
    model.train(sess,
                trainX,
                trainy,
                nb_epoch=conf.getint('model', 'num_epoch'),
                batch_size=conf.getint('model', 'batch_size'),
                model_path=model_path)
示例#30
0
def main(argv):
    conf = configparser.ConfigParser()
    print(conf.read(argv))
    max_smi_len = conf.getint('model', 'max_smi_len')
    max_seq_len = conf.getint('model', 'max_seq_len')

    char_smi_set = json.load(open(conf.get('model', 'char_smi')))
    char_seq_set = json.load(open(conf.get('model', 'char_seq')))

    data_path = conf.get('data', 'path')
    data_predicted = conf.get('data', 'prediction').split(',')

    sess = tf.InteractiveSession(
        config=tf.ConfigProto(allow_soft_placement=True))

    ''' SMILES + seq '''
    model = CNN(filter_num=conf.getint('model', 'filter_num'),
                smi_window_len=conf.getint('model', 'smi_window_len'),
                seq_window_len=conf.getint('model', 'seq_window_len'),
                max_smi_len=max_smi_len,
                max_seq_len=max_seq_len,
                char_smi_set_size=len(char_smi_set),
                char_seq_set_size=len(char_seq_set),
                embed_dim=conf.getint('model', 'embed_dim'))
    ''' ECFP + seq '''
    # model = ECFPCNN(filter_num=conf.getint('model', 'filter_num'),
    #                 seq_window_len=conf.getint('model', 'seq_window_len'),
    #                 char_seq_set_size=len(char_seq_set),
    #                 embed_dim=conf.getint('model', 'embed_dim'),
    #                 max_smi_len=max_smi_len,
    #                 max_seq_len=max_seq_len)

    model_path = os.path.join(
        conf.get('model', 'path', fallback='tmp'), 'all.model')

    for data_name in data_predicted:
        path = data_path + data_name + '/'

        ligands = pd.read_csv(path + 'ligands.csv', header=None)
        proteins = pd.read_csv(path + 'proteins.csv', header=None)

        smi_feature, seq_feature = get_data(
            ligands, proteins, max_smi_len, max_seq_len, char_smi_set, char_seq_set)

        inputs = []
        for smif in smi_feature:
            inputs.append([smif, seq_feature[0]])
        res = model.predict(sess, np.asarray(inputs), batch_size=conf.getint(
            'model', 'batch_size'), model_path=model_path)
        names = [x.split('.')[0] for x in list(ligands.iloc[:, 0])]
        final_data = pd.DataFrame(np.asarray(list(zip(names, res))))
        final_data.to_csv(path + 'res.csv', index=None, header=None)
示例#31
0
    def __init__(self):
        self.epsilon = EPSILON_MAX
        self.epsilon_min = EPSILON_MIN
        self.epsilon_decay = EPSILON_DECAY
        self.memory = deque(maxlen=MEMORY_SIZE)

        self.offline = CNN((4, 84, 84), 6)
        self.online = CNN((4, 84, 84), 6)
        
        if PLAY_FROM_WEIGHTS:
            self.epsilon = 0.0
            self.offline.model.load_weights('online_model.h5')
            self.online.model.load_weights('online_model.h5')
def train_pred(x_train, y_train, x_test, seed):
	print 'Building the CNN ...'
	rng = np.random.RandomState(seed)
	model = CNN(rng, .1)
	model.load_params('./param')
	print 'Training with early stop .. '
	x_train, y_train, x_valid, y_valid = split_train_set(x_train, y_train)
	model.train(x_train, y_train, x_valid, y_valid)
	model.save_params('./param')
	pred = model.predict(x_test)
	write_data(pred)
示例#33
0
    # load vocab, vocab_size, max_length
    with open('vocab.json', 'r') as fp:
        vocab = json.load(fp)

    # load configuration
    with open('config.txt', 'r') as f:
        vocab_size = int(re.sub('\n', '', f.readline()))
        max_length = int(f.readline())

    # open session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # make model instance
    model = CNN(sess=sess, vocab_size=vocab_size, sequence_length=max_length, trainable=True)

    # load trained model
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint(PATH))

    # inference
    while True:
        test = input("User >> ")
        if test == "exit":
            break
        speak = sentence_to_index_morphs([test], vocab, max_length)
        label, prob = model.predict(speak)
        if prob[0] < 0.6:
            response = '차분해 보이시네요 :)'
        else:
示例#34
0
    # save vocab, vocab_size, max_length
    with open('vocab.json', 'w') as fp:
        json.dump(vocab, fp)

    # save configuration
    with open('config.txt', 'w') as f:
        f.write(str(vocab_size) + '\n')
        f.write(str(max_length))

    # open session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # make model instance
    model = CNN(sess=sess, vocab_size=vocab_size, sequence_length=max_length, trainable=True)

    # assign pretrained embedding vectors
    model.embedding_assign(embedding)

    # make train batches
    batches = batch_iter(list(zip(x_input, y_input)), batch_size=64, num_epochs=5)

    # model saver
    saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=0.5)

    # train model
    print('모델 훈련을 시작합니다.')
    avgLoss = []
    for step, batch in enumerate(batches):
        x_train, y_train = zip(*batch)