示例#1
0
 def creat_model(self):
     self.embed = embed(vocab_size=self.vocab_size,embed_size=self.emb_dim)
     self.cnn_x1 = cnn_model(name="cnn_x1",out_size=self.conv_out) #input tensor: [batch, length, embed_size] kernel_size[fiter_size,len_size]
     self.cnn_x2 = cnn_model(name="cnn_x2",out_size=self.conv_out)
     self.rnn_x1 = rnn_model(name="rnn_x1", cell_size=self.lstm_cell) #input tensor: [batch, length, embed_size]
     self.rnn_x2 = rnn_model(name="rnn_x2", cell_size=self.lstm_cell)
     self.birnn_x1 = birnn_model(name="birnn_x1")
     self.birnn_x2 = birnn_model(name="birnn_x2")
     self.Attention_1 = Attention_1(size=32)  #input tensor: [batch, length-h,embed_size] ,self attention
     self.Attention_2 = Attention_1(name = "Attention_2", size=32)
     self.att_mat = Attention_2(name = "att_mat")  #input tensor1: [batch, x_1] input tensor2: [batch, x_2], outside attention before conv
     self.Att_mat_2 = Attention_2(name="att_mat_2") #outside attention before full connect
     self.Att_mat_3 = Attention_2(name = "att_mat_3")
     self.fc_layer = FC_model(name= "fc_layer")
示例#2
0
 def __init__(self):
     self.sess = tf.Session()
     self.X, self.y, self.train_op, self.cost, self.output, self.prob = cnn_model(
     )
     # restore model
     saver = tf.train.Saver()
     saver.restore(self.sess, "project/models/model/model.ckpt")
示例#3
0
def train(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print("Loading data...")
    train_set, vald_set = sentiment_dataset(batch_size=FLAGS.batch_size,
                                            max_sequence_length=FLAGS.sequence_length)
    encoder = tfds.features.text.TokenTextEncoder.load_from_file('encoder')


    if FLAGS.eval_only:
        model = tf.keras.models.load_model('model.h5')
        print('Evaluating on the training set...')
        model.evaluate(train_set, verbose=1)
        print('Evaluating on the validation set...')
        model.evaluate(vald_set, verbose=1)
        return

    print('Building model...')
    model = cnn_model(encoder.vocab_size,
                      FLAGS.embedding_dim,
                      FLAGS.sequence_length,
                      FLAGS.dropout_rate,
                      FLAGS.num_filters,
                      FLAGS.hidden_units)

    model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
                  optimizer=tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate),
                  metrics=[tf.keras.metrics.BinaryAccuracy(),
                           tf.keras.metrics.AUC()])

    model.fit(train_set,
              epochs=FLAGS.num_epochs,
              validation_data=vald_set,
              verbose=1)
    tf.keras.models.save_model(model, 'model.h5')
示例#4
0
def train_neural_network(x_train, y_train, x_val, y_val, learning_rate = 0.05, drop_rate = 0.7, epochs = 10, batch_size = 1):
    x_input = tf.placeholder(tf.float32, shape=[None, None, None, None, 1], name = 'input')
    y_input = tf.placeholder(tf.float32, shape=[None, n_class], name = 'output')
    drop_prob = tf.placeholder(tf.float32, shape = None)
    with tf.name_scope("cross_entropy"):
        prediction = cnn_model(x_input, drop_prob, seed = 42)
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y_input))
                              
    with tf.name_scope("training"):
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
    
    predicted_label = tf.argmax(prediction, 1, name = 'predicted_label')    
    correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_input, 1))
    accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name = 'accuracy')
    
    iterations = int(len(x_train)/batch_size)
    
    # to save model
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        start_time = datetime.datetime.now()

        iterations = int(len(x_train)/batch_size) + 1
        # run epochs

        maxAcc = 0
        for epoch in range(epochs):
            start_time_epoch = datetime.datetime.now()
            print('Epoch: ', epoch)
            epoch_loss = 0
            # mini batch
            for itr in range(iterations):
                mini_batch_x = x_train[itr * batch_size: min((itr + 1)*batch_size, len(x_train))]
                mini_batch_y = y_train[itr * batch_size: min((itr + 1)*batch_size, len(y_train))]
                if not mini_batch_x:
                    continue
                _optimizer, _cost = sess.run([optimizer, cost], feed_dict={x_input: mini_batch_x, y_input: mini_batch_y, drop_prob: drop_rate})
                epoch_loss += _cost

            #  using mini batch in case not enough memory
            acc = 0
            numValBatches = int(len(x_val)/batch_size) + 1
            for itr in range(numValBatches):
                mini_batch_x_val = x_val[itr * batch_size: min((itr + 1) * batch_size, len(x_val))]
                mini_batch_y_val = y_val[itr * batch_size: min((itr + 1) * batch_size, len(y_val))]
                if not mini_batch_x_val:
                    continue
                acc += sess.run(accuracy, feed_dict={x_input: mini_batch_x_val, y_input: mini_batch_y_val})
            valAcc = round(acc / numValBatches, 5)
            end_time_epoch = datetime.datetime.now()
            print(' Testing Set Accuracy:', valAcc, ' Time elapse: ', str(end_time_epoch - start_time_epoch))
            if valAcc > maxAcc:
                # save model when better performance
                saver.save(sess, join(saveModel_dir, 'acc_' + str(valAcc)))
                maxAcc = valAcc 

        end_time = datetime.datetime.now()
        print('Time elapse: ', str(end_time - start_time))
示例#5
0
def get_age(input):
    image_list = input.image_list

    image_list = ast.literal_eval(image_list)
    image_list = np.array(image_list, dtype="float32")
    image_list = image_list.reshape(1, 48, 48, 1)

    age = model.cnn_model(image_list)
    return age
示例#6
0
def inference(left_img, right_img, pre_trained=False):
    with tf.variable_scope('feature_generator', reuse=tf.AUTO_REUSE) as sc:
        if not pre_trained:
            from model import cnn_model
            left_features = cnn_model(tf.layers.batch_normalization(left_img))
            right_features = cnn_model(
                tf.layers.batch_normalization(right_img))
            merged_features = tf.abs(tf.subtract(left_features,
                                                 right_features))
        else:
            from model import pre_trained_model
            merged_features, left_features, right_features = pre_trained_model(
                left_img, right_img)
    logits = tf.layers.dense(
        merged_features,
        1,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.1))
    logits = tf.reshape(logits, [-1])
    return logits, left_features, right_features
示例#7
0
def train(name):

    acc_test_d1 = []
    acc_test_d2 = []
    acc_test_d3 = []
    t1_x, t1_y = task[0].train.images[:1000], task[0].train.labels[:1000]
    t2_x, t2_y = task[1].train.images[:1000], task[1].train.labels[:1000]
    t3_x, t3_y = task[2].train.images[:1000], task[2].train.labels[:1000]
    global first_model
    if first_model is None:
        first_model = cnn_model()
        first_model.val_data(task[0].validation.images,
                             task[0].validation.labels)
        first_model.fit(t1_x, t1_y)

    from copy import deepcopy
    model = deepcopy(first_model)

    test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    if name == 'kal':
        model.transfer(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.transfer(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_pre':
        model.use_pre(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_pre(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_cur':
        model.use_cur(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_cur(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'nor':
        model.fit(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.fit(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    model.save(name)
    model.plot('res', name)

    return acc_test_d1, acc_test_d2, acc_test_d3
示例#8
0
def train(name):

    acc_test_d1 = []
    acc_test_d2 = []
    acc_test_d3 = []

    model = cnn_model()
    model.val_data(X_test[:TEST_NUM], y_test[:TEST_NUM])
    model.fit(X_train[:TRAIN_NUM], y_train[:TRAIN_NUM])

    test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    if name == 'kal':
        model.transfer(X_train[TRAIN_NUM:TRAIN_NUM * 2],
                       y_train[TRAIN_NUM:TRAIN_NUM * 2])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.transfer(X_train[TRAIN_NUM * 2:TRAIN_NUM * 3],
                       y_train[TRAIN_NUM * 2:TRAIN_NUM * 3])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_pre':
        model.use_pre(X_train[TRAIN_NUM:TRAIN_NUM * 2],
                      y_train[TRAIN_NUM:TRAIN_NUM * 2])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_pre(X_train[TRAIN_NUM * 2:TRAIN_NUM * 3],
                      y_train[TRAIN_NUM * 2:TRAIN_NUM * 3])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_cur':
        model.use_cur(X_train[TRAIN_NUM:TRAIN_NUM * 2],
                      y_train[TRAIN_NUM:TRAIN_NUM * 2])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_cur(X_train[TRAIN_NUM * 2:TRAIN_NUM * 3],
                      y_train[TRAIN_NUM * 2:TRAIN_NUM * 3])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'nor':
        model.fit(X_train[TRAIN_NUM:TRAIN_NUM * 2],
                  y_train[TRAIN_NUM:TRAIN_NUM * 2])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.fit(X_train[TRAIN_NUM * 2:TRAIN_NUM * 3],
                  y_train[TRAIN_NUM * 2:TRAIN_NUM * 3])
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    model.save(name)
    model.plot('res', name)

    return acc_test_d1, acc_test_d2, acc_test_d3
示例#9
0
def train(name):

    acc_test_d1 = []
    acc_test_d2 = []
    acc_test_d3 = []
    t1_x, t1_y = train_set[0][0], train_set[0][1]
    t2_x, t2_y = train_set[1][0], train_set[1][1]
    t3_x, t3_y = train_set[2][0], train_set[2][1]
    global first_model
    if first_model is None:
        first_model = cnn_model()
        first_model.val_data(vali_set[0][0], vali_set[0][1])
        first_model.fit(t1_x, t1_y)

    from copy import deepcopy
    model = deepcopy(first_model)

    test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    if name == 'kal':
        model.transfer(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.transfer(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_pre':
        model.use_pre(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_pre(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_cur':
        model.use_cur(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_cur(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'nor':
        model.fit(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.fit(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    model.save(name)
    model.plot('res', name)

    return acc_test_d1, acc_test_d2, acc_test_d3
示例#10
0
def train(argv=None):
    print("Loading data...")
    x_train, y_train, x_test, y_test, vocabulary_inv = load_data(
        FLAGS.max_words, FLAGS.sequence_length)

    print('Building model...')
    if FLAGS.model_type == 'cnn':
        model = cnn_model(len(vocabulary_inv), FLAGS.embedding_dim,
                          FLAGS.sequence_length, FLAGS.dropout_rate,
                          FLAGS.num_filters, FLAGS.hidden_units)
    elif FLAGS.model_type == 'lstm':
        model = lstm_model(vocab_length=len(vocabulary_inv),
                           embedding_dim=FLAGS.embedding_dim,
                           sequence_length=FLAGS.sequence_length,
                           dropout_rate=FLAGS.dropout_rate,
                           lstm_units=FLAGS.num_filters,
                           hidden_units=FLAGS.hidden_units)
    else:
        raise ValueError(
            'Unrecognized value `{}` for argument model_type'.format(
                FLAGS.model_type))

    if FLAGS.sequence_length != x_test.shape[1]:
        print("Adjusting sequence length for actual size")
        FLAGS.sequence_length = x_test.shape[1]

    print("x_train shape:", x_train.shape)
    print("x_test shape:", x_test.shape)
    print("Vocabulary Size: {:d}".format(len(vocabulary_inv)))

    model.compile(
        loss=tf.keras.losses.BinaryCrossentropy(),
        optimizer=tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate),
        metrics=[tf.keras.metrics.BinaryAccuracy(),
                 tf.keras.metrics.AUC()])

    model.fit(x_train,
              y_train,
              batch_size=FLAGS.batch_size,
              epochs=FLAGS.num_epochs,
              validation_data=(x_test, y_test),
              verbose=1)
    tf.keras.models.save_model(model, '{}.h5'.format(FLAGS.model_type))
示例#11
0
def train(heuristic_model_iteration=None):
    if heuristic_model_iteration != None:
        data_path = 'data/iteration-{:02d}-data.json'.format(
            heuristic_model_iteration)
        model_path = 'save/iteration-{:02d}-weights.hdf5'.format(
            heuristic_model_iteration)
    else:
        data_path = 'data.json'
        model_path = 'weights.hdf5'

    with open(data_path) as f:
        df = pd.read_json(f)

    X = np.array(df['data'].values.tolist()).reshape(-1, 12, 12, 1)
    y = to_categorical(df['label'], num_classes=2)
    print("data count by class:", np.sum(y, axis=0))

    train_validate_split = 0.8
    s = int(len(df) * train_validate_split)
    X_train, X_test, y_train, y_test = X[:s], X[s:], y[:s], y[s:]

    class_count = np.sum(y_train, axis=0)
    class_weight = {0: class_count[0], 1: class_count[1]}

    model = cnn_model()
    if heuristic_model_iteration >= 1:  # new model trained base on last model
        model.load_weights('save/iteration-{:02d}-weights.hdf5'.format(
            heuristic_model_iteration - 1))

    mc = ModelCheckpoint(model_path,
                         save_best_only=True,
                         monitor='val_loss',
                         mode='min',
                         save_weights_only=True)

    history = model.fit(X_train,
                        y_train,
                        class_weight=class_weight,
                        validation_data=(X_test, y_test),
                        epochs=5,
                        batch_size=512,
                        callbacks=[mc])
示例#12
0
def interpret(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Reading data...')
    x_train, y_train, x_test, y_test = mitbih_dataset()
    print('Dataset shape: {}'.format(x_train.shape))
    print('Loading model...')
    original_model = tf.keras.models.load_model('model.h5')

    interpret_model = cnn_model(for_interpretation=True)
    interpret_model.load_weights('model.h5', by_name=True)

    y_pred = original_model.predict(x_test)
    y_pred_max = np.argmax(y_pred, axis=-1)

    explainer = PathExplainerTF(interpret_model)

    for c in range(5):
        print('Interpreting class {}'.format(c))
        class_mask = np.logical_and(y_test == c, y_pred_max == y_test)
        class_indices = np.where(class_mask)[0][:FLAGS.num_examples]

        batch_samples = x_test[class_indices]

        attributions = explainer.attributions(inputs=batch_samples,
                                              baseline=x_train,
                                              batch_size=FLAGS.batch_size,
                                              num_samples=FLAGS.num_samples,
                                              use_expectation=True,
                                              output_indices=c,
                                              verbose=True)
        np.save('attributions_{}.npy'.format(c), attributions)

        interactions = explainer.interactions(inputs=batch_samples,
                                              baseline=x_train,
                                              batch_size=FLAGS.batch_size,
                                              num_samples=FLAGS.num_samples,
                                              use_expectation=True,
                                              output_indices=c,
                                              verbose=True)
        np.save('interactions_{}.npy'.format(c), interactions)
示例#13
0
def test(model_path, test_count=100):

    model = cnn_model()
    model.load_weights(model_path)

    result_satistic = {'model_win': 0, 'model_loss': 0, 'draw':0}   # with respect to BLACK
    simulation_data = []

    for _ in tqdm(range(test_count)):
        go = GoSimulate(12)

        next_move = True
        color = BLACK
        move_count = 0
        game_steps = []

        while next_move:
            next_move = go.play_one_move(model, color)
            color = next_color(color)
            game_steps.append(go.get_board_2d())
            move_count += 1
            if move_count == 10000:
                raise RecursionError('Max simulating stepts reached')

        score = go.score()
        if score > 0:
            result_satistic['model_win'] += 1
            result = 1
        elif score < 0:
            result_satistic['model_loss'] += 1
            result = 2
        elif score == 0:
            result_satistic['draw'] += 1
            result = 0

        for s in game_steps:
            simulation_data.append({'data': s, 'label': result})

    print ("Test result (model player vs random player):")
    print (result_satistic)
示例#14
0
def train(name):

    acc_test_d1 = []
    acc_test_d2 = []
    acc_test_d3 = []
    t1_x, t1_y = task[0].train.images, task[0].train.labels
    t2_x, t2_y = task[1].train.images, task[1].train.labels
    t3_x, t3_y = task[2].train.images, task[2].train.labels
    model = cnn_model()
    model.val_data(task[0].validation.images, task[0].validation.labels)
    model.fit(t1_x, t1_y)

    test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    if name == 'kal':
        model.transfer(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.transfer(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_pre':
        model.use_pre(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_pre(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'kal_cur':
        model.use_cur(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.use_cur(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
    if name == 'nor':
        model.fit(t2_x, t2_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)
        model.fit(t3_x, t3_y)
        test_acc(model, acc_test_d1, acc_test_d2, acc_test_d3)

    model.save(name)
    model.plot('res', name)

    return acc_test_d1, acc_test_d2, acc_test_d3
示例#15
0
def train(argv=None):
    set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Reading data...')
    x_train, y_train, x_test, y_test = mitbih_dataset()
    print('Dataset shape: {}'.format(x_train.shape))

    if FLAGS.evaluate:
        model = tf.keras.models.load_model('model.h5')
        print('Evaluating on the training data...')
        model.evaluate(x_train, y_train, verbose=2)
        print('Evaluating on the test data...')
        model.evaluate(x_test, y_test, verbose=2)
        return

    print('Building model...')
    model = cnn_model()

    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=FLAGS.learning_rate,
        decay_steps=int(x_train.shape[0] / FLAGS.batch_size),
        decay_rate=FLAGS.decay_rate,
        staircase=True)
    optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                         beta_1=FLAGS.beta_1,
                                         beta_2=FLAGS.beta_2)
    loss = tf.keras.losses.SparseCategoricalCrossentropy()
    metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    print('Training model...')
    model.fit(x_train,
              y_train,
              epochs=FLAGS.epochs,
              batch_size=FLAGS.batch_size,
              verbose=1,
              validation_data=(x_test, y_test))

    tf.keras.models.save_model(model, 'model.h5')
train_generator = inout.image_generator(X_train_paths,
                                  y_train,
                                  batch_size,
                                  resize_dims=resize_dims,
                                  randomly_augment=add_random_augmentations)
valid_generator = inout.image_generator(X_valid_paths, y_valid,
                                  batch_size, resize_dims=resize_dims,
								  rand_order=False)



############ Training Section
from keras.models import Sequential, Model
from keras import optimizers

inputs, outs = mod.cnn_model(first_conv_shapes, conv_shapes, conv_depths, dense_shapes, image_shape, n_labels)

model = Model(inputs=inputs,outputs=outs)

model.load_weights('./models/gpu_model_update.h5')
learning_rate = .0001
for i in range(20):
    if i > 4:
        learning_rate = .00001 # Anneals the learning rate
    adam_opt = optimizers.Adam(lr=learning_rate)
    model.compile(loss='categorical_crossentropy', optimizer=adam_opt, metrics=['accuracy'])
    history = model.fit_generator(train_generator, train_steps_per_epoch, epochs=1,
                        validation_data=valid_generator,validation_steps=valid_steps_per_epoch, max_q_size=1)
    model.save('./models/gpu_model_update.h5')
示例#17
0
        X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    print('X_train shape:', X_train.shape)
    # np_utils.to_categorical将整型标签转为onehot。
    # 在这里将向量转成了矩阵
    Y_train = np_utils.to_categorical(y_train, 40)
    Y_val = np_utils.to_categorical(y_val, 40)
    Y_test = np_utils.to_categorical(y_test, 40)

    model = model.cnn_model()
    # 训练模型
    train.train_model(model, X_train, Y_train, X_val, Y_val, epochs)
    # 测试模型
    score = test.test_model(model, X_test, Y_test)
    print(score)
    # 加载训练好的模型
    model.load_weights('model_weights.h5')
    # 计算预测的类别
    classes = model.predict_classes(X_test, verbose=0)
    # 计算正确率
    test_accuracy = np.mean(np.equal(y_test, classes))
    print("last accuarcy:", test_accuracy)
    error_num = 0
    for i in range(0, 40):
        if y_test[i] != classes[i]:
示例#18
0
    model_wrapper = PyTorchModelWrapper(model, tokenizer)
    print(evaluate(model_wrapper.model,
                   test_dataloader))  # for checking whether loading is correct
    return model_wrapper


if __name__ == "__main__":
    # 3 tasks: train, evaluate, pre-generate
    task = "train"  # Todo: change this
    args = get_args()

    # define model and tokenizer
    if args.model_short_name == "lstm":
        model_wrapper = lstm_model(args)
    else:
        model_wrapper = cnn_model(args)
    model = model_wrapper.model
    tokenizer = model_wrapper.tokenizer

    # prepare dataset and dataloader
    train_dataset, validation_dataset, test_dataset = return_dataset(
        args.dataset)
    train_text, train_labels = prepare_dataset_for_training(train_dataset)
    eval_text, eval_labels = prepare_dataset_for_training(validation_dataset)
    test_text, test_labels = prepare_dataset_for_training(test_dataset)

    train_dataloader = _make_dataloader(tokenizer, train_text, train_labels,
                                        args.batch_size)
    eval_dataloader = _make_dataloader(tokenizer, eval_text, eval_labels,
                                       args.batch_size)
    test_dataloader = _make_dataloader(tokenizer, test_text, test_labels,
############### User Defined Variables
#
# data_path = '/Volumes/WhiteElephant/cervical_cancer/test'
# model_path = 'model_update.h5'

data_path = './test'
model_path = 'gpu_model_update.h5'

resize_dims = (256, 256, 3)
test_divisions = 20  # Used for segmenting image evaluation in threading
batch_size = 100  # Batch size used for keras predict function

############## Create Model
from keras.models import Sequential, Model

ins, outs = mod.cnn_model()
model = Model(inputs=ins, outputs=outs)
model.load_weights(model_path)

############# Read in Data
test_paths, test_labels, _ = inout.read_paths(data_path, no_labels=True)
print(str(len(test_paths)) + ' testing images')

############# Make Predictions
predictions = []
pool = ThreadPool(processes=1)
portion = len(
    test_paths) // test_divisions + 1  # Number of images to read in per pool

async_result = pool.apply_async(inout.convert_images,
                                (test_paths[0 * portion:portion * (0 + 1)],
示例#20
0
def interpret(argv=None):
    print('Setting up environment...')
    utils.set_up_environment(visible_devices=FLAGS.visible_devices)

    print("Loading data...")
    train_set, vald_set = sentiment_dataset(
        batch_size=FLAGS.batch_size, max_sequence_length=FLAGS.sequence_length)
    encoder = tfds.features.text.TokenTextEncoder.load_from_file('encoder')

    print('Loading model...')
    interpret_model = cnn_model(encoder.vocab_size,
                                FLAGS.embedding_dim,
                                FLAGS.sequence_length,
                                FLAGS.dropout_rate,
                                FLAGS.num_filters,
                                FLAGS.hidden_units,
                                for_interpretation=True)

    model = tf.keras.models.load_model('model.h5')
    embedding_model = tf.keras.models.Model(model.input,
                                            model.layers[1].output)

    interpret_model.load_weights('model.h5', by_name=True)

    explainer = PathExplainerTF(interpret_model)

    if use_custom_sentences:
        custom_sentences = ['This movie was good', 'This movie was not good']

    num_accumulated = 0
    accumulated_inputs = []
    accumulated_embeddings = []
    for i, (batch_input, batch_label) in enumerate(vald_set):
        batch_embedding = embedding_model(batch_input)

        batch_pred = model(batch_input)
        batch_pred_max = (batch_pred[:, 0].numpy() > 0.5).astype(int)

        correct_mask = batch_pred_max == batch_label

        accumulated_inputs.append(batch_input[correct_mask])
        accumulated_embeddings.append(batch_embedding[correct_mask])
        num_accumulated += np.sum(correct_mask)
        if num_accumulated >= FLAGS.num_sentences:
            break

    accumulated_inputs = tf.concat(accumulated_inputs, axis=0)
    accumulated_embeddings = tf.concat(accumulated_embeddings, axis=0)
    np.save('accumulated_inputs.npy', accumulated_inputs.numpy())
    np.save('accumulated_embeddings.npy', accumulated_embeddings.numpy())

    baseline_input = np.zeros(accumulated_inputs[0:1].shape)
    baseline_embedding = embedding_model(baseline_input)

    print('Getting attributions...')
    # Get word-level attributions
    embedding_attributions = explainer.attributions(
        accumulated_embeddings,
        baseline_embedding,
        batch_size=FLAGS.batch_size,
        num_samples=FLAGS.num_samples,
        use_expectation=False,
        output_indices=0,
        verbose=True)
    np.save('embedding_attributions.npy', embedding_attributions)

    print('Getting interactions...')
    # Get pairwise word interactions
    max_indices = np.max(np.sum(accumulated_inputs != 0, axis=-1))
    interaction_matrix = np.zeros(
        (accumulated_embeddings.shape[0], max_indices, FLAGS.embedding_dim,
         FLAGS.sequence_length, FLAGS.embedding_dim))

    indices = np.indices((max_indices, FLAGS.embedding_dim))
    indices = indices.reshape(2, -1)
    indices = indices.swapaxes(0, 1)
    for interaction_index in tqdm(indices):
        embedding_interactions = explainer.interactions(
            accumulated_embeddings,
            baseline_embedding,
            batch_size=FLAGS.batch_size,
            num_samples=FLAGS.num_samples,
            use_expectation=False,
            output_indices=0,
            verbose=False,
            interaction_index=interaction_index)
        interaction_matrix[:, interaction_index[0],
                           interaction_index[1], :, :] = embedding_interactions
    np.save('interaction_matrix.npy', interaction_matrix)
示例#21
0
x_train, y_train = load_train_dataset()
y_train = to_categorical(y_train)
x_train, x_validation, y_train, y_validation = train_test_split(x_train,
                                                                y_train,
                                                                test_size=0.2)

train_generator = ImageDataGenerator(rotation_range=20,
                                     horizontal_flip=True,
                                     height_shift_range=0.2,
                                     width_shift_range=0.2,
                                     zoom_range=0.2,
                                     channel_shift_range=0.2)
validation_generator = ImageDataGenerator()

model = cnn_model()

callbacks = [
    ModelCheckpoint(filepath="./models/model_{epoch:02d}.h5"),
    TensorBoard(log_dir="./logs"),
]

model_history = model.fit_generator(
    train_generator.flow(x_train, y_train, batch_size),
    epochs=epochs,
    callbacks=callbacks,
    validation_data=validation_generator.flow(x_validation, y_validation,
                                              batch_size))

model.save("./models/model_final.h5")
history = model_history.history
示例#22
0
physical_devices = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)

batch_size = 64
epochs = 200
samples = 20000
size = 64
input_size = (size, size, 3)

x_train, y_train = load_regulized_train_dataset(samples, size)
y_train = to_categorical(y_train)
x_train, x_validation, y_train, y_validation = train_test_split(
    x_train, y_train, test_size=0.2, stratify=y_train)

model = cnn_model(input_size)

callbacks = [
    ModelCheckpoint(filepath="./models/model_{epoch:02d}.h5"),
    TensorBoard(log_dir="./logs")
]

model_history = model.fit(x_train,
                          y_train,
                          epochs=epochs,
                          callbacks=callbacks,
                          validation_data=(x_validation, y_validation))

model.save("./models/model_final.h5")
history = model_history.history
示例#23
0
labels = list(
    np.genfromtxt(label_csv_path,
                  delimiter=',',
                  skip_header=1,
                  usecols=5,
                  dtype=None))
# X = tf.placeholder(tf.float32, shape=[None,64,64,3])
# Y = tf.placeholder(tf.float32, shape=[None,25])

# X = inputs['images']
# Y = inputs['labels']

X = tf.placeholder(name='ip', dtype=tf.float32, shape=(None, 64, 64, 1))
Y = tf.placeholder(tf.int32, [None, 1])
# network = model.model(X_train)
network = model.cnn_model(X)
[optimizer, cost] = training.trainer(network, Y)
print(optimizer)

# Initialization
sess = tf.Session()
num_points = len(filenames)
# Run training
for epoch in range(100):
    for jj in range(int(math.floor((num_points // batch_size) - 1))):
        # Get the data
        sess.run(tf.global_variables_initializer())
        inputs = input_fn.input_fn(filenames, labels, batch_size)
        sess.run(inputs['iterator_init_op'])
        train_X = sess.run(inputs['images'])
        train_Y = sess.run(inputs['labels'])
示例#24
0
def main():

    batch_size = 128
    num_classes = 10
    epochs = 50

    # load data
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    x_train = np.reshape(
        x_train, (len(x_train), 28, 28,
                  1))  # adapt this if using `channels_first` image data format
    x_test = np.reshape(
        x_test, (len(x_test), 28, 28,
                 1))  # adapt this if using `channels_first` image data format

    noise_factor = 0.5
    x_train_noisy = x_train + noise_factor * np.random.normal(
        loc=0.0, scale=1.0, size=x_train.shape)
    x_test_noisy = x_test + noise_factor * np.random.normal(
        loc=0.0, scale=1.0, size=x_test.shape)

    x_train_noisy = np.clip(x_train_noisy, 0., 1.)
    x_test_noisy = np.clip(x_test_noisy, 0., 1.)

    # callbacks
    tb = TensorBoard(log_dir='./graphs/encoded')

    # pretrain an autoencoder
    autoencoder = autoencoder_model()
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
    autoencoder.fit(x_train_noisy,
                    x_train,
                    epochs=epochs,
                    batch_size=batch_size,
                    shuffle=True,
                    validation_data=(x_test, x_test))

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, num_classes)
    y_test = to_categorical(y_test, num_classes)

    # first model
    x = autoencoder.get_layer('max_pooling2d_3').output
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    output = Dense(num_classes, activation='softmax')(x)

    model = Model(inputs=autoencoder.input, outputs=output)
    model.compile(optimizer='adadelta',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # saving model
    model.save('my_model.h5')

    model.fit(x_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              shuffle=True,
              validation_data=(x_test, y_test),
              callbacks=[tb])

    # second model
    model_2 = load_model('my_model.h5')

    # Freeze the layers except the last 3 layers
    for layer in model_2.layers[:-3]:
        layer.trainable = False

    model_2.compile(optimizer='adadelta',
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    # callbacks
    tb2 = TensorBoard(log_dir='./graphs/freezed')

    model_2.fit(x_train,
                y_train,
                epochs=epochs,
                batch_size=batch_size,
                shuffle=True,
                validation_data=(x_test, y_test),
                callbacks=[tb2])

    # third model
    model_3 = cnn_model(num_classes)
    model_3.compile(optimizer='adadelta',
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    # callbacks
    tb3 = TensorBoard(log_dir='./graphs/conv')

    model_3.fit(x_train,
                y_train,
                epochs=epochs,
                batch_size=batch_size,
                shuffle=True,
                validation_data=(x_test, y_test),
                callbacks=[tb3])

    # models evaluation
    score = model.evaluate(x_test, y_test, verbose=0)
    print('\nModel 1')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    score = model_2.evaluate(x_test, y_test, verbose=0)
    print('\nModel 2')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    score = model_3.evaluate(x_test, y_test, verbose=0)
    print('\nModel 3')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # evaluate noisy data
    score = model.evaluate(x_test_noisy, y_test, verbose=0)
    print('\nModel 1')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    score = model_2.evaluate(x_test_noisy, y_test, verbose=0)
    print('\nModel 2')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    score = model_3.evaluate(x_test_noisy, y_test, verbose=0)
    print('\nModel 3')
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
示例#25
0
import keras
from model import cnn_model

model = cnn_model((64, 64, 3))
keras.utils.plot_model(model, to_file="./model.png")
示例#26
0
def interpret(argv=None):
    print('Setting up environment...')
    utils.set_up_environment(visible_devices=FLAGS.visible_devices)

    print('Loading data...')
    x_train, y_train, x_test, y_test, vocabulary_inv = load_data(
        FLAGS.max_words, FLAGS.sequence_length)

    lengths = np.sum(x_test != 0, axis=1)
    min_indices = np.argsort(lengths)

    print('Loading model...')
    if FLAGS.model_type == 'cnn':
        interpret_model = cnn_model(len(vocabulary_inv),
                                    FLAGS.embedding_dim,
                                    FLAGS.sequence_length,
                                    FLAGS.dropout_rate,
                                    FLAGS.num_filters,
                                    FLAGS.hidden_units,
                                    for_interpretation=True)
    elif FLAGS.model_type == 'lstm':
        interpret_model = lstm_model(vocab_length=len(vocabulary_inv),
                                     embedding_dim=FLAGS.embedding_dim,
                                     sequence_length=FLAGS.sequence_length,
                                     dropout_rate=FLAGS.dropout_rate,
                                     lstm_units=FLAGS.num_filters,
                                     hidden_units=FLAGS.hidden_units,
                                     for_interpretation=True)
    else:
        raise ValueError(
            'Unrecognized value `{}` for argument model_type'.format(
                FLAGS.model_type))

    model = tf.keras.models.load_model('{}.h5'.format(FLAGS.model_type))
    embedding_model = tf.keras.models.Model(model.input,
                                            model.layers[1].output)

    interpret_model.load_weights('{}.h5'.format(FLAGS.model_type),
                                 by_name=True)

    explainer = PathExplainerTF(interpret_model)

    batch_input = x_test[min_indices[:FLAGS.num_sentences]]
    batch_embedding = embedding_model(batch_input)
    batch_pred = model(batch_input)

    baseline_input = np.zeros(x_test[0:1].shape)
    baseline_embedding = embedding_model(baseline_input)

    print('Getting attributions...')
    # Get word-level attributions
    embedding_attributions = explainer.attributions(
        batch_embedding,
        baseline_embedding,
        batch_size=FLAGS.batch_size,
        num_samples=FLAGS.num_samples,
        use_expectation=False,
        output_indices=0,
        verbose=True)
    np.save('embedding_attributions_{}.npy'.format(FLAGS.model_type),
            embedding_attributions)

    print('Getting interactions...')
    # Get pairwise word interactions
    max_indices = np.sum(batch_input[-1] != 0)
    interaction_matrix = np.zeros(
        (FLAGS.num_sentences, max_indices, FLAGS.embedding_dim,
         FLAGS.sequence_length, FLAGS.embedding_dim))

    indices = np.indices((max_indices, FLAGS.embedding_dim))
    indices = indices.reshape(2, -1)
    indices = indices.swapaxes(0, 1)
    for interaction_index in tqdm(indices):
        embedding_interactions = explainer.interactions(
            batch_embedding,
            baseline_embedding,
            batch_size=FLAGS.batch_size,
            num_samples=FLAGS.num_samples,
            use_expectation=False,
            output_indices=0,
            verbose=False,
            interaction_index=interaction_index)
        interaction_matrix[:, interaction_index[0],
                           interaction_index[1], :, :] = embedding_interactions
    np.save('interaction_matrix_{}.npy'.format(FLAGS.model_type),
            interaction_matrix)
示例#27
0
# moving_avg(train_data, test_data, window, overlap, ch_height = 6, avg_width = 1)

# fft(train_data, test_data, ch_height = 6, fre_width = 1)

# stft(train_data, test_data, sampling_freq ,window, overlap, freq_height = 1, time_width = 1)

# validation(train_data, train_label, num_classes = 4, val_percent = 0.9):

골라서 사용
"""
#train_image, test_image, height, width = preprocessing.moving_avg(traindata, testdata, 12, 6, 1, 1)
train_image, test_image, height, width = preprocessing.stft(
    traindata, testdata, 100, 6, 2, 1)

#train_image, train_label, val_img, val_label = preprocessing.validation(train_image, trainlabel, train_percent = 0.9)
"""
# cnn_model(input_shape, learning_rate = 0.00001, dropout = 0.5, num_classes = 5)
def model_fit(cnn_model, traindata, trainlabel, model_file_name, val_img , val_label, mode, epoch = 200, batch_size = 50)

model_pred는 save된 모델을 load하여 테스트
# model_pred(model_file_name, testdata, testlabel, num_classes = 4)
"""
save_name = 'MAV10_8_500_323264_6_1'
train_model = model.cnn_model((height, width, 1))
train_model = model.model_fit(train_model,
                              train_image,
                              train_label,
                              save_name,
                              mode='save',
                              epoch=300)
model.model_pred(save_name, test_image, test_label)
示例#28
0
from matplotlib import pyplot as plt
import seaborn as sns
from tensorflow.keras.models import model_from_json, Model
import json
from model import cnn_model, base_model, VGG16Model
import numpy as np
import cv2
import h5py


def train_model(model_instance: base_model,
                model_json_file,
                weights_file,
                grayscale=True):
    h5f = h5py.File('./Data/SVHN_multi_digit_norm_grayscale.h5', 'r')

    # Load the training, test and validation set
    X_train = h5f['X_train'][:]
    y_train = h5f['y_train'][:]
    X_test = h5f['X_test'][:]
    y_test = h5f['y_test'][:]
    X_val = h5f['X_val'][:]
    y_val = h5f['y_val'][:]
    h5f.close()
    model = model_instance
    model.fit_model(X_train, y_train, X_val, y_val)
    model.save_model(model_json_file, weights_file)


train_model(cnn_model(), "cnn_model.json", "cnn_w.h5")
示例#29
0
def train_model(
    # specify model
    model_type="cnn",  # 'cnn' or 'features'
    # training kwargs
    epochs=4,
    steps_per_epoch=50000,  # batches per epoch
    fit_verbose=2,
    callbacks=None,
    # early stopping
    min_delta=5e-4,  # minimum relative improvement in validation metric to continue training
    patience=5,  # epochs allowed to satisfy min_delta improvement
    # save/checkpoint
    prefix="trained_model",
    # save stdout/stderr to file?
    save_std_to_file=True,
):

    assert model_type in ["cnn", "features"]

    folder = prefix + datetime.datetime.now().strftime("_%Y%m%d_%H%M%S")

    model_dir = utilities.model_dir / folder
    model_dir.mkdir(parents=True)

    if save_std_to_file:
        # capture stdout/stderr and send to files
        stdout_file = model_dir / "stdout.txt"
        stderr_file = model_dir / "stderr.txt"
        sys.stdout = open(stdout_file, "w")
        sys.stderr = open(stderr_file, "w")

    print("Inputs and defaults")
    signature = inspect.signature(train_model)
    local_vars = locals()
    input_vars = {}
    for key, param in signature.parameters.items():
        input_vars[key] = local_vars[key]
        print(f"  {key}  input {input_vars[key]}  (default {param.default})")
    pickle_file = model_dir / "inputs.pickle"
    with pickle_file.open("wb") as f:
        pickle.dump(input_vars, f)

    # get data
    elm_data = data_v2.Data(
        fraction_validate=config.fraction_validate,
        fraction_test=config.fraction_test,
        kfold=False,
        smoothen_transition=False,
    )
    all_data, datasets = elm_data.get_datasets()
    _, _, test_data = all_data
    train_dataset, validation_dataset, test_dataset = datasets

    test_file = model_dir / "test_data.pickle"
    with test_file.open("wb") as f:
        pickle.dump(
            {
                "signals": np.array(test_data[0]),
                "labels": np.array(test_data[1]),
                "sample_indices": test_data[2],
                "window_start_indices": test_data[3],
                "signal_window_size": config.signal_window_size,
                "label_look_ahead": config.label_look_ahead,
            },
            f,
        )

    # kwargs for all models
    model_kwargs = dict(
        signal_window_size=config.signal_window_size,
        dense_layers=config.dense_layers,
        dropout_rate=config.dropout_rate,
        l2_factor=config.l2_factor,
        relu_negative_slope=config.relu_negative_slope,
    )

    # define model
    if model_type == "cnn":
        m = model.cnn_model(
            # kwargs for all models
            **model_kwargs,
            # kwargs for CNN models
            conv_size=config.conv_size,
            cnn_layers=config.cnn_layers,
        )
    elif model_type == "features":
        m = model.feature_model(
            # kwargs for all models
            **model_kwargs,
            # kwargs for feature models
            maxpool_size=config.maxpool_size,
            filters=config.filters,
        )
    else:
        raise ValueError(
            'Unknown model type is passed. Use either "cnn" or "features".'
        )

    # optimizer
    steps_per_halving = steps_per_epoch * config.epochs_per_halving
    optimizer = tf.keras.optimizers.SGD(
        learning_rate=utilities.Exp_Learning_Rate_Schedule(
            initial_learning_rate=config.initial_learning_rate,
            minimum_learning_rate_factor=config.minimum_learning_rate_factor,
            steps_per_halving=steps_per_halving,
        ),
        momentum=config.momentum,
        nesterov=True,
    )

    m.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
    )

    sample_output = m.evaluate(
        x=test_dataset,
        steps=1,
        verbose=0,
        return_dict=True,
    )
    print("Sample evaluation:")
    for key, value in sample_output.items():
        print(f"  {key}, {value:.4f}")

    if not callbacks:

        # default callbacks when not specified by input

        # Tensorboard logs
        # log_dir = model_dir / 'tensorboard-logs'
        # log_dir.mkdir(parents=True)
        # print(f'Tensorboard log dir: {log_dir.as_posix()}')

        checkpoint_dir = model_dir / "checkpoints"
        checkpoint_dir.mkdir(parents=True)
        print(f"Checkpoint dir: {checkpoint_dir.as_posix()}")

        callbacks = [
            # tf.keras.callbacks.TensorBoard(
            #     log_dir=log_dir.as_posix(),
            #     histogram_freq=1,
            #     update_freq=5000,
            #     ),
            tf.keras.callbacks.EarlyStopping(
                min_delta=min_delta,
                patience=patience,
                verbose=1,
            ),
            tf.keras.callbacks.ModelCheckpoint(
                filepath=checkpoint_dir,
                monitor="val_loss",
            ),
        ]

    history = m.fit(
        x=train_dataset,
        verbose=fit_verbose,
        epochs=epochs,
        validation_data=validation_dataset,
        workers=2,
        use_multiprocessing=True,
        callbacks=callbacks,
        steps_per_epoch=steps_per_epoch,  # batches per epoch
        validation_steps=steps_per_epoch // 4,
    )

    print("Final validation metrics")
    for key, value in history.history.items():
        print(f"  {key}, {value[-1]:.4f}")

    # evaluate validation and test datasets
    for ds_name, dataset in zip(
        ["Validation", "Test"],
        [validation_dataset, test_dataset],
    ):
        print(f"{ds_name} metrics")
        result = m.evaluate(
            x=dataset,
            verbose=0,
            use_multiprocessing=True,
            workers=2,
            return_dict=True,
        )
        for key, value in result.items():
            print(f"  {key}, {value:.4f}")

    save_file = model_dir / "saved_model.tf"
    print(f"Saving model: {save_file.as_posix()}")
    m.save(save_file)

    if save_std_to_file:
        # release stdout/stderr catpure
        sys.stdout.close()
        sys.stderr.close()
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__

    return history, result
        x1_p = self.pred(senti_x1,)
        x2_p = self.pred(senti_x2)
        x1_r = self.pred(self.x1_labels)
        x2_r = self.pred(self.x2_labels)
        self.acc_x1 = self.acc(x1_r, x1_p)
        self.acc_x2 = self.acc(x2_r, x2_p)
        self.Summary()
    def optimizer(self):
        self.opt = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(self.loss)

    def acc(self,r,p):
        accuracy = tf.reduce_mean(tf.cast(tf.equal(r,p),dtype=tf.float32))
        return accuracy

    def pred(self,x):
        p = tf.argmax(x,axis=1)
        return p

    def Summary(self):
        tf.summary.scalar("source_acc", self.acc_x1)
        tf.summary.scalar("source_acc", self.acc_x2)
        tf.summary.scalar("loss", self.loss)
        self.summary = tf.summary.merge_all()

if __name__ == '__main__':
    m = cnn_model(300,100,5000,2,32,32,2000,1)
    m.build()
    pass