コード例 #1
0
ファイル: graph.py プロジェクト: ds-ga-1007/assignment9
def merge_by_year(year):
    '''
    Merge the countries and income data sets for any given year. 
    '''
    income = load_data('income')
    countries = load_data('countries')
    income_yr = pd.DataFrame(income.ix[year])
    col_name = ['Country', 'Region', 'Income']
    newdf = pd.merge(countries, income_yr, left_on='Country', right_index=True)
    newdf.columns = col_name
    return newdf
コード例 #2
0
ファイル: graph.py プロジェクト: ds-ga-1007/assignment9
def merge_by_year(year):
    '''
    Merge the countries and income data sets for any given year. 
    '''
    income = load_data('income')
    countries = load_data('countries')
    income_yr = pd.DataFrame(income.ix[year])
    col_name = ['Country','Region','Income']
    newdf = pd.merge(countries,income_yr,left_on = 'Country',right_index =True)
    newdf.columns = col_name
    return newdf
コード例 #3
0
def test(args):
    with fluid.dygraph.guard(place):
        model = getattr(models, config.model_name)()
        model_dict, _ = fluid.load_dygraph(config.model_name + '_best')
        model.load_dict(model_dict)
        model.eval()
        test_loader = load_data('eval')
        data_loader = fluid.io.DataLoader.from_generator(capacity=5,
                                                         return_list=True)
        data_loader.set_batch_generator(test_loader, places=place)

        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(data_loader):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            prediction, acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=prediction, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))
コード例 #4
0
def val(model):
    with fluid.dygraph.guard(place):
        model.eval()
        val_loader = load_data('valid')
        data_loader = fluid.io.DataLoader.from_generator(capacity=5,
                                                         return_list=True)
        data_loader.set_batch_generator(val_loader, places=place)

        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(data_loader):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            prediction, acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=prediction, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))

        return acc_val_mean
コード例 #5
0
def main(args=None):

    parser = argparse.ArgumentParser(
        description='combine adversarial learning for ISIC-2017 Segmentation')
    parser.add_argument('--data_dir', default=str, help='Path to Dataset file')
    parser.add_argument('--model_dir', default=str, help='Path to save model')
    parser.add_argument('--tb_dir',
                        default=str,
                        help='Path to save TensorBoard log')
    parser.add_argument('--epochs',
                        help='Number of epochs',
                        type=int,
                        default=10000)
    parser.add_argument('--batch_size',
                        help='Number of BS',
                        type=int,
                        default=10)

    parser = parser.parse_args(args)

    x_train, y_train, x_valid, y_valid, x_test, y_test = load_data()
    #    print (x_train.shape)
    #    print (y_train.shape)
    Net = Unet_combine_adver()
    Net.set_up_unet(parser.batch_size, IMAGE_SIZE, OUTPUT_SIZE, CLASS_NUM)
    Net.train(x_train, y_train, x_valid, y_valid, x_test, y_test,
              parser.batch_size, parser.model_dir, parser.tb_dir,
              parser.epochs)
コード例 #6
0
def main():
    config = Config()

    print('Prepare data for train and dev ... ')
    train_dev_split(config.original_file, config.train_file, config.dev_file,
                    config.vocab_file, config.split_ratio)
    print('Prepare data sucessfully!')

    lstm_config = Config()
    rnn_model = LSTM_Dynamic(lstm_config)
    gpu_options = tf.GPUOptions(allow_growth=True)
    gpu_options =tf.GPUOptions(per_process_gpu_memory_fraction=0.5, allow_growth=True) ##每个gpu占用0.8                                                                              的显存
    config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        if lstm_config.if_train:
            init=tf.global_variables_initializer()
            sess.run(init)
            (X_train, y_train) = load_data(lstm_config.train_file)

            if len(X_train) < lstm_config.batch_size:
                for i in range(0, lstm_config.batch_size - len(X_train)):
                    X_train.append([0])
                    y_train.append([0])

            seq_len_train = list(map(lambda x: len(x), X_train))

            rnn_model.train_epoch(sess, lstm_config.train_file, X_train,
                                  y_train, seq_len_train,
                                  lstm_config.model_path)

    print('Success for preparing data')
コード例 #7
0
def train_by_columns(data_file, voc_file, model_dir, glove_dir, epoch,
                     batch_size, train_split, eval_split, embedding_dim):
    print('training: data = {}, voc = {}, '
          'epochs = {}, batch size = {}, '
          'glove dir = {}, embedding dim = {} ---> models = {}'.format(
              data_file, voc_file, epoch, batch_size, glove_dir, embedding_dim,
              model_dir))

    train_x, train_y, test_x, test_y = data_process.load_data(
        data_file, train_split)
    vocab = data_process.load_vocab(voc_file)

    max_seq = train_x.shape[1]
    vocab_size = len(vocab) + 1
    print('training: sequence length = {}, vocabulary size = {}'.format(
        max_seq, vocab_size))

    embedding_layers = data_process.load_embeddings(vocab, max_seq, glove_dir,
                                                    embedding_dim)

    for i in range(0, 4):
        model_name = 'model_{}'.format(i)
        model = train_category(model_name, train_x, train_y.iloc[:, i], test_x,
                               test_y.iloc[:, i], embedding_layers, eval_split,
                               epoch, batch_size)

        model.save(
            os.path.join(
                model_dir,
                MODEL_FILE_NAME_TEMPLATE.format(max_seq, epoch, embedding_dim,
                                                i)))
コード例 #8
0
def main():
    config = Config()
    print('Prepare data for train and dev ... ')
    train_dev_split(config.original_file, config.train_file, config.dev_file,
                    config.vocab_file, config.split_ratio)
    print('Prepare data sucessfully!')
    lstm_config = Config()
    blstm_model = LSTM_Dynamic(lstm_config)
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        if lstm_config.if_test:
            print('Start load dev data ...')
            (X_test, y_test) = load_data(lstm_config.dev_file)
            print('Loading sucess!')

            if len(X_test) < lstm_config.batch_size:
                for i in range(0, lstm_config.batch_size - len(X_test)):
                    X_test.append([0])
                    y_test.append([0])

            seq_len_test = list(map(lambda x: len(x), X_test))
            print('Target to special model to test')
            test_model = os.path.join(lstm_config.model_path, "models_epoch38")
            print('Start do predicting...')
            blstm_model.test(sess, test_model, X_test, y_test, seq_len_test,
                             lstm_config.vocab_file,
                             lstm_config.model_path + 'result/')
コード例 #9
0
ファイル: main.py プロジェクト: reniew/Korean_Text_Spacing
def main(self):

    inputs, labels, t2i, i2t, max_len, embedding_matrix = data.load_data()
    vocab_size = len(t2i)
    params = make_params(max_len, vocab_size, embedding_matrix)
    estimator = tf.estimator.Estimator(model_fn=model.model_fn,
                                       model_dir=DEFINES.check_point,
                                       params=params)
    estimator.train(lambda: data.train_input_fn(inputs, labels))
コード例 #10
0
ファイル: graph.py プロジェクト: ds-ga-1007/assignment9
def plot_income(year):
    fig, ax = plt.subplots(figsize=(10,10)) 
    income = load_data('income')
    income.ix[year].dropna().hist()
    ax.set_title('Histogram of income for year '+str(year))
    ax.set_xlabel('Income per person')
    ax.set_ylabel('Count')
    fig.savefig('Histogram of income for year '+str(year)+'.pdf')
    plt.show()
    plt.close() 
コード例 #11
0
ファイル: graph.py プロジェクト: ds-ga-1007/assignment9
def plot_income(year):
    fig, ax = plt.subplots(figsize=(10, 10))
    income = load_data('income')
    income.ix[year].dropna().hist()
    ax.set_title('Histogram of income for year ' + str(year))
    ax.set_xlabel('Income per person')
    ax.set_ylabel('Count')
    fig.savefig('Histogram of income for year ' + str(year) + '.pdf')
    plt.show()
    plt.close()
コード例 #12
0
    def load_data(self):
        """
        data
        :return:
        """
        x, y = data_process.load_data(filename=r'mulclass_clean.csv')
        x = self.train_comm_vocab(x)

        data_train, data_valid = self.split_data(x, y)
        self.train_args['num_classes'] = len(y[0])

        return data_train, data_valid
コード例 #13
0
def main(test_model):

    config = Config()
    config.batch_size = 1024
    config.hidden_size = 64
    config.vocab_size = 26
    config.embed_size = 320
    config.max_epochs = 100
    config.label_kinds = 2
    config.if_train = True
    config.if_test = True
    config.is_biLSTM = True
    config.max_seqlen = 20

    config.original_file = '../data/most_frequent_words_label.txt'
    config.train_file = '../data/most_frequent_words_label_train.txt'
    config.dev_file = '../data/most_frequent_words_label_dev'
    config.vocab_file = '../data/vocab.txt'
    config.model_path = 'models/Transformer_softmax_lstm/'

    config.split_ratio = 0.8

    print('Prepare data for train and dev ... ')
    train_dev_split(config.original_file, config.train_file, config.dev_file,
                    config.vocab_file, config.split_ratio)
    print('Prepare data sucessfully!')

    model = Transformer(config)
    gpu_options = tf.GPUOptions(allow_growth=True)
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=0.5, allow_growth=True
    )  ##每个gpu占用0.8                                                                              的显存
    tf_config = tf.ConfigProto(gpu_options=gpu_options,
                               allow_soft_placement=True)
    with tf.Session(config=tf_config) as sess:
        if config.if_test:
            init = tf.global_variables_initializer()
            sess.run(init)
            (X_test, y_test) = load_data(config.dev_file)

            if len(X_test) < config.batch_size:
                for i in range(0, config.batch_size - len(X_test)):
                    X_test.append([0])
                    y_test.append([0])

            seq_len_test = list(map(lambda x: len(x), X_test))

            print('Target to special model to test')
            print('Start do predicting...')
            model.test(sess, test_model, X_test, y_test, seq_len_test,
                       config.vocab_file, config.model_path + 'result/')

    print('Success for preparing data')
コード例 #14
0
def train(args):
    print('Now startingt training.......')
    with fluid.dygraph.guard(place):
        model = getattr(models, config.model_name)()
        train_loader = load_data('train', config.batch_size)
        data_loader = fluid.io.DataLoader.from_generator(capacity=5,
                                                         return_list=True)
        data_loader.set_batch_generator(train_loader, places=place)
        # train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=config.batch_size)
        # optimizer = fluid.optimizer.Adam(learning_rate=config.lr)
        optimizer = fluid.optimizer.Adam(
            learning_rate=fluid.layers.piecewise_decay(
                boundaries=[15630, 31260], values=[1e-3, 1e-4, 1e-5]),
            regularization=fluid.regularizer.L2Decay(
                regularization_coeff=1e-4))
        EPOCH_NUM = config.max_epoch
        best_acc = -1
        for epoch_id in range(EPOCH_NUM):
            model.train()
            for batch_id, data in enumerate(data_loader):
                # image_data = np.array([x[0] for x in data]).astype('float32').reshape(-1, 28, 28)
                # label_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1)
                # image_data = np.expand_dims(image_data, axis=1)
                image_data, label_data = data
                # print("data shape => ", image_data.shape)
                # print("label shape => ", label_data.shape)
                image = fluid.dygraph.to_variable(image_data)
                label = fluid.dygraph.to_variable(label_data)

                predict, avg_acc = model(image, label)
                loss = fluid.layers.cross_entropy(predict, label)
                # print(loss)
                avg_loss = fluid.layers.mean(loss)
                if batch_id != 0 and batch_id % 200 == 0:
                    print(
                        "epoch: {}, batch: {}, loss is: {}, acc is {}".format(
                            epoch_id, batch_id, avg_loss.numpy(),
                            avg_acc.numpy()))

                avg_loss.backward()
                optimizer.minimize(avg_loss)
                model.clear_gradients()

            fluid.save_dygraph(model.state_dict(),
                               config.model_name + '_current')
            val_acc = val(model)
            if val_acc > best_acc:
                fluid.save_dygraph(model.state_dict(),
                                   config.model_name + '_best')

            best_acc = max(val_acc, best_acc)
コード例 #15
0
def main():
    # 指定运行的GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    args = parse_args()

    # 输出参数
    print "args: ", args

    load_data_start_time = time.time()
    # 载入数据集
    train, valid, test = data_process.load_data()

    print("Loading data done. (%0.3f s)" %
          (time.time() - load_data_start_time))
    print("%d train examples." % len(train[0]))
    print("%d valid examples." % len(valid[0]))

    # 数据集统计信息
    print("%d test examples." % len(test[0]))

    keep_probability = np.array(args.keep_probability)
    no_dropout = np.array(args.no_dropout)

    result_path = "./save/" + args.dataset

    # Build model
    with tf.Session(config=config) as sess:
        # 建立模型
        model = CSRM(sess=sess,
                     n_items=args.n_items,
                     dim_proj=args.dim_proj,
                     hidden_units=args.hidden_units,
                     patience=args.patience,
                     memory_size=args.memory_size,
                     memory_dim=args.memory_dim,
                     shift_range=args.shift_range,
                     controller_layer_numbers=args.controller_layer_numbers,
                     batch_size=args.batch_size,
                     epoch=args.epoch,
                     lr=args.lr,
                     keep_probability=keep_probability,
                     no_dropout=no_dropout,
                     display_frequency=args.display_frequency)

        # 训练
        model.train(train, valid, test, result_path)
コード例 #16
0
def train():
    # Load data
    logger.info("✔︎ Loading data...")

    x_train, y_train = dp.load_data(FLAGS.training_data_file)

    logger.info("✔︎ Finish building BOW.")

    model = SVR()

    logger.info("✔︎ Training model...")
    model.fit(x_train, y_train)

    logger.info("✔︎ Finish training. Saving model...")
    joblib.dump(model, FLAGS.model_file)
コード例 #17
0
ファイル: main.py プロジェクト: longshuicui/nmt-pytorch
def train(args):
    src_lang, tar_lang, pairs = load_data()
    vectors = text2tensor(src_lang, tar_lang, pairs)
    generator = gen_batch_data(vectors, args.batch_size, args.src_max_len,
                               args.tar_max_len)

    logging.info('=' * 10 + '模型初始化')
    encoder = Encoder(batch_size=args.batch_size,
                      seq_len=args.src_max_len,
                      embedding_size=args.embedding_size,
                      hidden_size=args.hidden_dim,
                      num_layers=args.num_layers,
                      src_vocab_size=src_lang.n_words)
    decoder = Decoder(batch_size=args.batch_size,
                      embedding_size=args.embedding_size,
                      hidden_size=args.hidden_dim,
                      num_layers=args.num_layers,
                      tar_vocab_size=tar_lang.n_words)
    criterion = nn.CrossEntropyLoss()
    model = Seq2Seq(encoder=encoder, decoder=decoder,
                    criterion=criterion).to(device)
    opt = torch.optim.Adam(model.parameters(), lr=args.lr)
    logging.info('=' * 10 + 'Starting training Model...')
    for iter in range(1, args.iters + 1):
        for i, batch_data in enumerate(generator):
            src_data = torch.tensor(batch_data[:, 0].tolist()).to(device)
            tar_data = torch.tensor(batch_data[:, 1].tolist()).to(device)

            #TODO 模型的损失一直都是0,效果不好,增加注意力机制试一试
            loss, outputs = model(src_data, tar_data)
            logging.info('=' * 10 + "【当前迭代%d,批次%d】, 当前损失:%.4f" %
                         (iter, i + 1, loss))
            for param_group in opt.param_groups:
                if iter <= 2:
                    param_group['lr'] = 0.1
                else:
                    param_group['lr'] = args.lr

            opt.zero_grad()
            loss.backward()
            opt.step()

    torch.save(
        {
            'model_state_dict': model.state_dict(),
            'src_lang': src_lang,
            'tar_lang': tar_lang
        }, args.save_path)
コード例 #18
0
def main(self):

    inputs, labels, t2i, i2t, max_len = data.load_data(DEFINES.data_path)
    encoder_inputs = inputs
    decoder_inputs, decoder_targets = prepare_dec(labels)

    embedding_matrix = data.get_embedding_matrix(DEFINES.data_path,
                                                 DEFINES.embedding_path, i2t)

    params = make_params(embedding_matrix, max_len)
    estimator = tf.estimator.Estimator(model_fn=model.model_fn,
                                       model_dir=DEFINES.check_point,
                                       params=params)

    estimator.train(lambda: data.train_input_fn(encoder_inputs, decoder_inputs,
                                                decoder_targets))
コード例 #19
0
def main():
    # Find the file from the rigth folder
    script_dir = os.path.dirname(__file__)  #<-- absolute dir the script is in
    rel_path = "data/winequality-white.csv"
    abs_file_path = os.path.join(script_dir, rel_path)
    file_name = abs_file_path

    # Get the pandas dict-object
    data = data_process.load_data(file_name)

    # Visualize the table
    print(data)

    # Is there linear correlation?
    # -1 and 1 high linear correlation, 0 no linear correlation
    #cor = data.corr()
    #sns.heatmap(cor)

    # Train Lasso regression to identify unnecessary features for linear regression
    #feature_selection.lasso_r(data)

    #-----------------------------------------------------------------------------------------------------------------------------------------------------------
    # Beneath all functions to create different Regression and Classification models

    # Train linear regression with cross validation
    #linear_reg.train_linear(data)

    # Train polynomial regression
    #polynomial_reg.train_poly(data, 2)

    # Train logical Regression
    #Logical_reg.train_logical(data)

    #Train k-mean classifier
    #k_means.class_kmeans(data)

    #Train RandomForest Classifier
    #r_forest.rforest(data)

    #Train RandomForest Classifier
    #n_bayes.nbayes(data)

    #Train RandomForest Classifier
    #SVM.SVM_train(data)

    # Show plots
    plt.show()
コード例 #20
0
ファイル: train.py プロジェクト: kkokilep/GANTesting
def train():
    """ Training
    """

    ##
    # ARGUMENTS
    opt = Options().parse()
    ##
    # LOAD DATA
    dataloader = load_data(opt)
    ##
    # LOAD MODEL
    model = Ganomaly(opt, dataloader)
    ##
    # TRAIN MODEL
    model.train()
    model.test2()
コード例 #21
0
def test():
    logger.info("✔︎ Loading data...")

    x_test, y_test = dp.load_data(FLAGS.test_data_file)

    logger.info("✔︎ Loading model...")
    model = joblib.load(FLAGS.model_file)

    logger.info("✔︎ Predicting...")
    y_pred = model.predict(x_test)

    logger.info("✔︎ Calculate Metrics...")
    pcc, doa = dp.evaluation(y_test, y_pred)
    rmse = mean_squared_error(y_test, y_pred) ** 0.5

    logger.info("☛ Logistic: PCC {0:g}, DOA {1:g}, RSME {2:g}".format(pcc, doa, rmse))

    logger.info("✔︎ Done.")
コード例 #22
0
ファイル: main.py プロジェクト: maidousj/functional_mechanism
def test(filepath):
    K = 5  # 5-folds cross-validation
    cv_rep = 10
    eps = 1.0

    X, y = data_process.load_data(filepath, minmax=(-1, 1), bias_term=True)
    rkf = RepeatedKFold(n_splits=K, n_repeats=cv_rep)
    errSum = 0.
    for train_index, test_index in rkf.split(X):
        train_X, train_y = X[train_index, :], y[train_index]
        test_X, test_y = X[test_index, :-1], y[test_index]

        w, b = fm_logistic.fm_logistic(train_X, train_y, eps)
        errorRate = evaluate.rightNum(test_X, test_y, w, b) / len(test_y)

        errSum += errorRate

    print(errSum / (K * cv_rep))
コード例 #23
0
ファイル: cnn.py プロジェクト: BD-MF/TextClassification-CNN
def load_data(data_source):
    assert data_source in ["keras_data_set",
                           "local_dir"], "Unknown data source"
    if data_source == "keras_data_set":
        (x_train, y_train), (x_test,
                             y_test) = imdb.load_data(num_words=max_words,
                                                      start_char=None,
                                                      oov_char=None,
                                                      index_from=None)

        x_train = sequence.pad_sequences(x_train,
                                         maxlen=sequence_length,
                                         padding="post",
                                         truncating="post")
        x_test = sequence.pad_sequences(x_test,
                                        maxlen=sequence_length,
                                        padding="post",
                                        truncating="post")

        vocabulary = imdb.get_word_index()
        vocabulary_inv = dict((v, k) for k, v in vocabulary.items())
        vocabulary_inv[0] = "<PAD/>"
    else:
        x, y, vocabulary, vocabulary_inv_list = data_process.load_data()
        vocabulary_inv = {
            key: value
            for key, value in enumerate(vocabulary_inv_list)
        }
        y = y.argmax(axis=1)

        # Shuffle data
        shuffle_indices = np.random.permutation(np.arange(len(y)))
        x = x[shuffle_indices]
        y = y[shuffle_indices]
        train_len = int(len(x) * 0.9)
        x_train = x[:train_len]
        y_train = y[:train_len]
        x_test = x[train_len:]
        y_test = y[train_len:]

    return x_train, y_train, x_test, y_test, vocabulary_inv
コード例 #24
0
def main(self):

    arg_length = len(sys.argv)

    if (arg_length < 2):
        raise Exception("You should put one sentences to predict")

    inputs = []
    for i in sys.argv[1:]:
        inputs.append(i)

    inputs = " ".join(inputs)

    _, _, t2i, i2t, max_len = data.load_data(DEFINES.data_path)

    encoder_inputs, decoder_inputs = prepare_pred_input(inputs, t2i, max_len)
    print(encoder_inputs, decoder_inputs)

    embedding_matrix = data.get_embedding_matrix(DEFINES.data_path,
                                                 DEFINES.embedding_path, i2t)

    params = make_params(embedding_matrix, max_len)

    estimator = tf.estimator.Estimator(model_fn=model.model_fn,
                                       model_dir=DEFINES.check_point,
                                       params=params)

    predict_input_fn = tf.estimator.inputs.numpy_input_fn(x={
        "encoer_inputs":
        encoder_inputs,
        "decoder_inputs":
        decoder_inputs
    },
                                                          num_epochs=1,
                                                          shuffle=False)
    predict = estimator.predict(input_fn=predict_input_fn)

    prediction = next(predict)['prediction']

    print(inputs)
    print(data.token2str(prediction, i2t))
コード例 #25
0
def test_voting(args):
    with fluid.dygraph.guard(place):
        model1 = ResNet50()
        model2 = ano_model()
        model_dict1, _ = fluid.load_dygraph('ResNet50' + '_best')
        model_dict2, _ = fluid.load_dygraph('ano_model' + '_best')
        model1.load_dict(model_dict1)
        model2.load_dict(model_dict2)
        model1.eval()
        model2.eval()

        test_loader = load_data('eval')
        data_loader = fluid.io.DataLoader.from_generator(capacity=5,
                                                         return_list=True)
        data_loader.set_batch_generator(test_loader, places=place)

        acc_set = []
        avg_loss_set = []
        for batch_id, data in enumerate(data_loader):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            out1 = model1(img)  # [b, 10]
            out2 = model2(img)
            out = out1 + out2
            out = fluid.layers.softmax(input=out)
            acc = fluid.layers.accuracy(input=out, label=label)
            loss = fluid.layers.cross_entropy(input=out, label=label)
            avg_loss = fluid.layers.mean(loss)
            acc_set.append(float(acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        acc_val_mean = np.array(acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, acc_val_mean))
コード例 #26
0
ファイル: Seq2seq.py プロジェクト: liguoyu1/python
	train_X = data[:-1]
	train_Y = data[1:]
	train_X = np.array(train_X, dtype=np.int32)
	train_Y = np.array(train_Y, dtype=np.int32)
	predict_X = train_Y[-1]
	return train_X, train_Y, predict_X


# import data_process
# query, response = data_process.loadData()
# train_query, train_query_response, train_response, q_max_len, r_max_len, index = data_process.pad_SentencesQR(query, response)
print("load dic...")
word_id, id_word, index = data_process.load_dic()
# word_id, id_word, index = data_process.load_dic("/home/robot/cy_work/xiaomi_dict")
print("load data ...")
query, response, q_max_len, r_max_len = data_process.load_data()
# query, response, q_max_len, r_max_len = data_process.load_data(file="/home/robot/cy_work/xiaomi_sent_2_sent")
train_query, train_query_response, train_response, q_max_len, r_max_len, index = data_process.pad_sentences_qr(query,
                                                                                                               response,
                                                                                                               q_max_len,
                                                                                                               r_max_len,
                                                                                                               index)
print("finished load data!!!")

# data = loadData()
# train_X, train_Y, predict_X = get_train_X_Y(data)
# train_XY = np.append(train_X, train_Y, axis=1)

print("input max length:{}, output max length:{}".format(q_max_len, r_max_len))
#
sp = SequencePattern(name=None, in_seq_len=q_max_len, out_seq_len=r_max_len, embding_size=index)
コード例 #27
0
def train():

    print("Loading data...")

    train_data, val_data, test_data = load_data(FLAGS.training,
                                                FLAGS.validation,
                                                FLAGS.testing)

    print("Train {}".format(np.array(train_data).shape))

    print("Initializing...")

    with tf.Graph().as_default():

        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)

        sess = tf.Session(config=session_conf)

        with sess.as_default():

            cnn = Trash_CNN(
                num_classes=FLAGS.num_classes,
                input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
                filters=list(map(int, FLAGS.filter_sizes.split(","))),
                input_channel=FLAGS.num_filters)

            global_step = tf.Variable(0, name="global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(1e-3)
            grads_and_vars = optimizer.compute_gradients(cnn.loss)

            train_op = optimizer.apply_gradients(grads_and_vars)

            timestamp = str(int(time.time()))
            outdir = os.path.abspath(
                os.path.join(os.path.curdir, "runs", timestamp))
            print("Writing to {}\n".format(outdir))

            loss_summary = tf.summary.scalar("loss", cnn.loss)
            acc_summary = tf.summary.scalar("acc", cnn.accuracy)

            # Train Summaries
            train_summary_op = tf.summary.merge([loss_summary, acc_summary])
            train_summary_dir = os.path.join(outdir, "summaries", "train")
            train_summary_writer = tf.summary.FileWriter(
                train_summary_dir, sess.graph)

            # Dev summaries
            dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
            dev_summary_dir = os.path.join(outdir, "summaries", "dev")
            dev_summary_writer = tf.summary.FileWriter(dev_summary_dir,
                                                       sess.graph)

            # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
            checkpoint_dir = os.path.abspath(
                os.path.join(outdir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            saver = tf.train.Saver(tf.global_variables(),
                                   max_to_keep=FLAGS.num_checkpoints)

            sess.run(tf.global_variables_initializer())

            def train_step(batch_x, batch_y):
                '''
                    One single training step
                '''

                feed_dict = {cnn.input_x: batch_x, cnn.input_y: batch_y}

                _, step, summaries, loss, accuracy = sess.run(
                    [
                        train_op, global_step, train_summary_op, cnn.loss,
                        cnn.accuracy
                    ],
                    feed_dict=feed_dict)

                time_str = datetime.datetime.now().isoformat()
                print("{}: step {}, loss {:g}, acc: {:g}".format(
                    time_str, step, loss, accuracy))
                train_summary_writer.add_summary(summaries, step)

            def dev_step(batch_x, batch_y, writer=None):
                '''
                    Evaluate model
                '''

                feed_dict = {cnn.input_x: batch_x, cnn.input_y: batch_y}

                step, summaries, loss, accuracy = sess.run(
                    [global_step, train_summary_op, cnn.loss, cnn.accuracy],
                    feed_dict=feed_dict)

                time_str = datetime.datetime.now().isoformat()
                print("-------------- Step summary ---------------")
                print("{}: step {}, loss {:g}, acc: {:g}".format(
                    time_str, step, loss, accuracy))

                if writer:
                    writer.add_summary(summaries, step)

            batches = batch_iter(list(zip(train_data[0], train_data[1])),
                                 FLAGS.batch_size, FLAGS.num_epochs)

            for batch in batches:

                print("Batch {}".format(batch.shape))
                x_batch, y_batch = zip(*batch)

                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step)

                if current_step % FLAGS.evaluate_every == 0:
                    print(
                        "\n======================= Evaluation: ======================="
                    )
                    dev_step(val_data[0],
                             val_data[1],
                             writer=dev_summary_writer)
                    print("")

                if current_step % FLAGS.checkpoint_every == 0:
                    path = saver.save(sess,
                                      checkpoint_prefix,
                                      global_step=current_step)
                    print("Saved model checkpoint to {}\n".format(path))
コード例 #28
0
import data_process
from data_process import MyDataSet
import numpy as np
from baseline import SVR_baseline, evaluate, HA_baseline
from MyLSTM import MyLSTM
from tqdm import tqdm
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch.nn as nn
import torch
import torch.utils.data as Data

adj, flow = data_process.load_data()
flow = np.mat(flow, dtype=np.float32)
# 读入邻接矩阵
adj_norm = data_process.cal_adj_norm(adj)

x_train, y_train, x_test, y_test = data_process.train_test_spilt(
    flow, 4, 20, 0.3)
y_pred = HA_baseline(x_test, y_test)
evaluate(y_test, y_pred)

# dataset
BATCH_SIZE = 32
train_dataset = MyDataSet(x_train, y_train, type='train')
test_dataset = MyDataSet(x_test, y_test, type='test')
train_loader = Data.DataLoader(dataset=train_dataset,
                               batch_size=BATCH_SIZE,
                               shuffle=False)
test_loader = Data.DataLoader(dataset=test_dataset,
                              batch_size=BATCH_SIZE,
コード例 #29
0
ファイル: train.py プロジェクト: lssxfy123/PythonStudy
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process train parameters')
    parser.add_argument('data_directory', metavar='d', type=str, nargs='+', help='data directory')
    parser.add_argument('--save_dir', type=str, nargs='?', help='save directory')
    parser.add_argument('--learning_rate', type=float, nargs='?', help='learning rate')
    parser.add_argument('--hidden_units', type=int, nargs='+', help='hidden units')
    parser.add_argument('--epochs', type=int, nargs='?', help='epoch numbers')
    parser.add_argument('--arch', type=str, nargs='?', help='model select')
    parser.add_argument('--gpu', action='store_true', help='use gpu')
    args = parser.parse_args()
    
    ## 数据路径
    data_directory = os.path.abspath(args.data_directory[0])
    
    ## 加载数据
    class_to_idx, dataloaders = data_process.load_data(data_directory)
    
    save_directory = "."
    if args.save_dir:
        save_directory = args.save_dir
    if not os.path.isabs(save_directory):
        save_directory = os.path.abspath(save_directory)
    if not os.path.exists(save_directory):
        raise Exception("Save directory not exist")
    ## 模型名
    model_name = 'vgg16'
    if args.arch:
        model_name = args.arch
    
    ## 创建分类器
    input_size = 25088
コード例 #30
0
                    default='onehot',
                    help='the feature type for the atoms in modulars')
parser.add_argument(
    '--train_type',
    type=str,
    default='se',
    help=
    'training type of the model, each batch contains fixed edges or a side effect graph'
)

args = parser.parse_args()
# print the model parameters.
print(args)

# output contains the modular data like [node_attribute, edge_index, edge_weight, batch]
output, edges, edges_attr, se_name = load_data(args.modular_file,
                                               args.ddi_file, 'onehot')
print(len(list(output.keys())))
args.num_edge_features = edges_attr.size(1)
args.device = 'cpu'

# split data into train val test.
num_edges = edges_attr.size(0) // 2
train_num = int(num_edges * args.train_ratio)
val_num = int(num_edges * args.val_ratio)
test_num = int(num_edges * args.test_ratio)
nums = [train_num, val_num, test_num]

# change the input to the the side effect name
train_edges, train_edges_attr, val_edges, val_edges_attr, test_edges, test_edges_attr \
    = split_data(edges, se_name, nums)
# print(train_edges_attr)
コード例 #31
0
ファイル: test.py プロジェクト: ds-ga-1007/assignment9
'''
Test module for the program

Created on Dec 3, 2016
@author: Zhiqi Guo(zg475)
@email: [email protected]
'''
import unittest
import pandas as pd
from graph import merge_by_year
from data_process import load_data

countries = load_data('countries')
income = load_data('income')


class Test(unittest.TestCase):
    '''
    Run the test in the project's root directory
    with the following command:
        $ python -m unittest discover
    '''
    def test_merge(self):
        '''
        Test merge function Exhaustively for every country at year 2000. 
        And repeat the test 10 times. 
        '''
        for count in range(10):
            year = 2000
            income_yr = pd.DataFrame(income.ix[year]).astype(float)
            merged = merge_by_year(year)
コード例 #32
0
from data_process import load_data
from model import build_model, History
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator

v = int(28709 * 0.2)
r = int(28709 * 0.8 * 0.2) + v

if __name__ == '__main__':
    data_x, data_y = load_data('train.csv', 'train')
    valid_x, valid_y = data_x[:v], data_y[:v]
    data1_x, data1_y = data_x[v:r], data_y[v:r]
    data2_x, data2_y = data_x[r:], data_y[r:]

    datagen = ImageDataGenerator(
        #rescale = 1 / 255,
        #zca_whitening = True,
        rotation_range=3,
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True)
    datagen.fit(data1_x)

    history1 = History()

    model = build_model()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit_generator(
コード例 #33
0
ファイル: main.py プロジェクト: zbn123/Text-Classification-1
def main(args):
    # 可选择输出日志到文件
    # logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
    #                     filename=args.model_name + '.log',
    #                     filemode='w',
    #                     level=logging.INFO)
    logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
                        level=logging.INFO)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    set_seed(args.seed, n_gpu)

    logging.info("The active device is: {}, gpu_num: {}".format(device, n_gpu))

    args.ckpt_path = os.path.join(args.ckpt_path, args.model_name)
    try:
        os.makedirs(args.ckpt_path)
    except:
        pass

    extra_token_dict = {"unk_token": "<UNK>", "pad_token": "<PAD>"}

    train_dev_sentences, test_sentences, train_dev_labels, test_labels, word2id, id2word, embeddings = \
        load_data(args.raw_path, args.embedding_path, args.train_path, args.test_path, args.max_seq_len, extra_token_dict)

    f1_list = [0] * 5
    if args.do_train:
        kf = StratifiedKFold(n_splits=5,
                             shuffle=True).split(train_dev_sentences,
                                                 train_dev_labels)
        for cv_i, (train_index, dev_index) in enumerate(kf):
            logging.info(
                "******************Train CV_{}******************".format(cv_i))
            # 准备模型
            if args.model_name == "TextRNN":
                config = TextRNN_Config(embeddings, args.num_label)
                model = TextRNN(config)
            if args.model_name == "TextCNN":
                config = TextCNN_Config(embeddings, args.num_label)
                model = TextCNN(config)
            if args.model_name == "TextRCNN":
                config = TextRCNN_Config(embeddings, args.max_seq_len,
                                         args.num_label)
                model = TextRCNN(config)
            if args.model_name == "TextCNN_Highway":
                config = TextCNN_Highway_Config(embeddings, args.num_label)
                model = TextCNN_Highway(config)
            if args.model_name == "TextRNN_Attention":
                config = TextRNN_Attention_Config(embeddings, args.num_label)
                model = TextRNN_Attention(config)
            logging.info("Already load the model: {},".format(args.model_name))
            model.to(device)

            train_sentences = [train_dev_sentences[i] for i in train_index]
            train_labels = [train_dev_labels[i] for i in train_index]
            dev_sentences = [train_dev_sentences[i] for i in dev_index]
            dev_labels = [train_dev_labels[i] for i in dev_index]

            logging.info("Prepare dataloader...")
            train_tensor, train_sent_len, train_labels_tensor = convert2feature(
                train_sentences, train_labels, word2id, args.max_seq_len)
            train_data = TensorDataset(train_tensor, train_sent_len,
                                       train_labels_tensor)
            train_sampler = RandomSampler(train_data)
            train_dataloader = DataLoader(train_data,
                                          sampler=train_sampler,
                                          batch_size=args.batch_size)
            train_dataloader = cycle(train_dataloader)

            dev_tensor, dev_sent_len, dev_labels_tensor = convert2feature(
                dev_sentences, dev_labels, word2id, args.max_seq_len)
            dev_data = TensorDataset(dev_tensor, dev_sent_len,
                                     dev_labels_tensor)
            dev_sampler = SequentialSampler(dev_data)
            dev_dataloader = DataLoader(dev_data,
                                        sampler=dev_sampler,
                                        batch_size=args.dev_batch_size)

            logging.info("Begin to train...")
            f1_list[cv_i] = train_eval(train_dataloader, dev_dataloader, model,
                                       args.ckpt_path, args.train_steps,
                                       args.check_step, args.eval_step,
                                       args.lr, args.warmup_steps, cv_i)
            if not args.do_cv:
                break
        if args.do_cv:
            cv_f1 = np.mean(np.array(f1_list))
            logging.info("CV F1_list: {}, Mean_F1: {:.4f}".format(
                f1_list, cv_f1))

    if args.do_test:
        logging.info("******************Test******************")
        logging.info("Begin to test {}...".format(args.model_name))
        test_tensor, test_sent_len, test_labels_tensor = convert2feature(
            test_sentences, test_labels, word2id, args.max_seq_len)
        test_data = TensorDataset(test_tensor, test_sent_len,
                                  test_labels_tensor)
        test_sampler = SequentialSampler(test_data)
        test_dataloader = DataLoader(test_data,
                                     sampler=test_sampler,
                                     batch_size=args.dev_batch_size)

        final_results = np.zeros((len(test_labels), args.num_label))
        test_labels = test_labels_tensor.to('cpu').numpy()
        for cv_i in range(5):
            ckpt_path = os.path.join(args.ckpt_path,
                                     "pytorch_model_{}.pkl".format(cv_i))
            if args.model_name == "TextRNN":
                config = TextRNN_Config(embeddings, args.num_label)
                model = TextRNN(config)
                model.load_state_dict(torch.load(ckpt_path))
                model.to(device)
            if args.model_name == "TextCNN":
                config = TextCNN_Config(embeddings, args.num_label)
                model = TextCNN(config)
                model.load_state_dict(torch.load(ckpt_path))
                model.to(device)
            if args.model_name == "TextRCNN":
                config = TextRCNN_Config(embeddings, args.max_len,
                                         args.num_label)
                model = TextRCNN(config)
                model.load_state_dict(torch.load(ckpt_path))
                model.to(device)
            if args.model_name == "TextRNN_Attention":
                config = TextRNN_Attention_Config(embeddings, args.num_label)
                model = TextRNN_Attention(config)
                model.load_state_dict(torch.load(ckpt_path))
                model.to(device)
            output_labels, test_f1_score = test(test_dataloader, model, device,
                                                args.dev_batch_size)
            final_results = final_results + output_labels
            logging.info(
                "The cv_{} result of {} on test data: F1: {:.4f}".format(
                    cv_i, args.model_name, test_f1_score))
            if not args.do_cv:
                break

        test_f1_score = round(macro_f1(final_results, test_labels), 4)
        logging.info("The final result of {} on test data: F1: {:.4f}".format(
            args.model_name, test_f1_score))
コード例 #34
0
def main():
    #X_train contains path to image
    (X_train, Y_train), (X_test, Y_test) = load_data()
    Y_train = to_categorical(Y_train, 3)
    Y_test = to_categorical(Y_test, 3)

    model, modelName = getModel('naive', IMG_SIZE)
    #print out info about the models(layer structures etc)
    model.summary()
    #Adam is an optimization algorithm that can be used instead of the classical stochastic gradient descent procedure to update network weights iterative based in training data
    #Most commonly used, can be changed out by SGD or anything similar
    optimizer = Adam(learning_rate=LEARNING_RATE)

    #best to use categorial loss for classification problems
    loss_fuction = 'categorical_crossentropy'
    loss = [loss_fuction]

    #Metrics to measure during training, we are only interested in the prediction accuracy for now
    metrics = {'prediction': 'accuracy'}
    #set the optimizer, loss function, and the metrics to print when training
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    #List of Utilities called at certain points during model training
    callbacks = [
        LearningRateScheduler(schedular),  #schedule the learning rate
        ModelCheckpoint(
            os.path.join(CHECKPOINT_DIRECTORY,
                         modelName + '{epoch:02d}-{val_loss:.2f}.hdf5'),
            monitor='val_loss',  #monitor validation loss
            verbose=1,  #print fancy progressbar
            save_best_only=True,  #self explanatory
            mode='auto',  #the decision to overwrite current save file
            save_weights_only=True,  #save only the weights, not full model
            save_freq='epoch'),  #save after every epoch
        TensorBoard(log_dir=TENSORBOARD_DIRECTORY,
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True)
    ]

    p = getAugmentorPipeline()
    #stack operations...
    p.random_brightness(probability=0.25, min_factor=0.75, max_factor=1.25)
    p.flip_left_right(probability=0.25)
    p.rotate(probability=0.25, max_left_rotation=10, max_right_rotation=10)
    p.shear(probability=0.25, max_shear_left=10, max_shear_right=10)
    #p.zoom(probability=0.25, min_factor=1.1, max_factor=1.5)
    p.random_erasing(probability=0.25, rectangle_area=0.25)

    training_generator = TrainingGenerator(
        augmentor_pipeline=p,
        images=X_train,
        labels=Y_train,
        batch_size=BATCH_SIZE,
        img_size=IMG_SIZE,
        normalize=True,
        data_aug=True if USE_DATA_AUGMENTATION else False)

    validation_generator = TrainingGenerator(augmentor_pipeline=p,
                                             images=X_test,
                                             labels=Y_test,
                                             batch_size=BATCH_SIZE,
                                             img_size=IMG_SIZE,
                                             normalize=True,
                                             data_aug=False)
    print('Training model...')
    history = model.fit_generator(generator=training_generator,
                                  steps_per_epoch=len(X_train) // BATCH_SIZE,
                                  validation_data=validation_generator,
                                  validation_steps=len(X_test) // BATCH_SIZE,
                                  epochs=EPOCHS,
                                  verbose=1,
                                  callbacks=callbacks,
                                  workers=6,
                                  use_multiprocessing=False,
                                  shuffle=True,
                                  initial_epoch=0,
                                  max_queue_size=6)
    #Confusion Matrix and Classification Report
    Y_pred = model.predict(x=validation_generator,
                           steps=len(X_test) // BATCH_SIZE + 1)
    y_pred = np.argmax(Y_pred, axis=1)
    y_test = np.argmax(Y_test, axis=1)
    print('Saving confusion matrix as {}.png'.format(modelName))
    #covid 0, normal 1, pneumonia 2
    target_names = ['normal', 'corona', 'pnemonia']

    c_matrix = confusion_matrix(y_test, y_pred)
    df_cm = pd.DataFrame(c_matrix,
                         index=[i for i in target_names],
                         columns=[i for i in target_names])
    plt.figure(figsize=(10, 10))
    plot = sns.heatmap(df_cm, annot=True)
    plot.figure.savefig(
        os.path.join(CONFUSION_MATRIX_DIRECTIORY, '{}.png'.format(modelName)))

    print(f'Saving classification report as {modelName}.txt')
    with open(os.path.join(METRICS_DIRECTORY, '{}.txt'.format(modelName)),
              'w') as f:
        f.write(
            classification_report(y_test, y_pred, target_names=target_names))

    print('Saving history and model...')
    #Save the history
    with open(os.path.join(HISTORY_DIRECTORY, modelName + '.h5'), 'wb') as f:
        pickle.dump(history.history, f)

    #Save the whole model
    model.save(os.path.join(MODEL_DIRECTORY, modelName + '.h5'))
コード例 #35
0
ファイル: test.py プロジェクト: ds-ga-1007/assignment9
"""
Test module for the program

Created on Dec 3, 2016
@author: Zhiqi Guo(zg475)
@email: [email protected]
"""
import unittest
import pandas as pd
from graph import merge_by_year
from data_process import load_data

countries = load_data("countries")
income = load_data("income")


class Test(unittest.TestCase):
    """
    Run the test in the project's root directory
    with the following command:
        $ python -m unittest discover
    """

    def test_merge(self):
        """
        Test merge function Exhaustively for every country at year 2000. 
        And repeat the test 10 times. 
        """
        for count in range(10):
            year = 2000
            income_yr = pd.DataFrame(income.ix[year]).astype(float)