Пример #1
0
def softMax(N, D, K):
    # Start a session
    sess = tf.InteractiveSession()
    # Generate data, total no. of data = N*K; data is [N*K, D]; label is [N*K, 1]
    [data, initLabel] = gen.genData(N, D, K)
    # Reform label, each class is represent by a vector
    label = np.zeros((N*K,K))
    for i in range(N*K):
        label[i][initLabel[i]] = 1
    # Build the computation graph by creating nodes for the input and target output classes
    x = tf.placeholder("float", shape=[None, D])
    y_ = tf.placeholder("float", shape=[None, K])
    # Define weights and Bias
    W = tf.Variable(tf.zeros([D,K]))
    b = tf.Variable(tf.zeros([K]))
    # Initialize all variables
    sess.run(tf.initialize_all_variables())
    # Define prediction function y
    y = tf.nn.softmax(tf.matmul(x,W) + b)
    # Define cost function
    cross_entropy = -tf.reduce_sum(y_*tf.log(y))
    # Train model
    train_step = tf.train.GradientDescentOptimizer(0.0001).minimize(cross_entropy)
    for i in range(5000):
      train_step.run(feed_dict={x: data, y_: label})
    # Evaluation
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print accuracy.eval(feed_dict={x: data, y_: label})
Пример #2
0
def all_test():
    train_data = ImageDataGenerator(rescale=1. / 255)
    test_data = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_data.flow_from_directory(directory=FILE_PATH_TRAIN,
                                                     target_size=(IMG_SIZE[0],
                                                                  IMG_SIZE[1]),
                                                     batch_size=BATCH_SIZE,
                                                     class_mode='categorical')

    test_generator = test_data.flow_from_directory(directory=FILE_PATH_TEST,
                                                   target_size=(IMG_SIZE[0],
                                                                IMG_SIZE[1]),
                                                   batch_size=BATCH_SIZE,
                                                   class_mode='categorical')

    i, j = train_generator.next()
    print(i)

    train = genData("D:/data/Dog-cat/train/")
    i, j = train.__next__()
    print(i['the_input'])
Пример #3
0
  Returns:
    A scalar int32 tensor with the number of examples (out of batch_size)
    that were predicted correctly.
  """
  # For a classifier model, we can use the in_top_k Op.
  # It returns a bool tensor with shape [batch_size] that is true for
  # the examples where the label's is was in the top k (here k=1)
  # of all logits for that example.
  correct = tf.nn.in_top_k(logits, labels, 1)
  # Return the number of true entries.
  return tf.reduce_sum(tf.cast(correct, tf.int32))

# number of points per class
N = 100
[data, labels] = gen.genData(N, DIMENSION, NUM_CLASSES)
[test_data, test_labels] = gen.genData(N, DIMENSION, NUM_CLASSES)

# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
    # Generate placeholders for the images and labels.
    data_placeholder = tf.placeholder(tf.float32, shape=(None, DIMENSION))
    labels_placeholder = tf.placeholder(tf.int32, shape=(None))
    # Build a Graph that computes predictions from the inference model.
    logits = inference(data_placeholder, FLAGS.hidden1, FLAGS.hidden2)

    # Add to the Graph the Ops for loss calculation.
    loss = loss(logits, labels_placeholder)

    # Add to the Graph the Ops that calculate and apply gradients.
    train_op = training(loss, FLAGS.learning_rate)
def train(model):
    """
    选择训练方式

    :param model: 0,1,2。0用自己写的cnn来训练;1用MobileNetV2训练;2用ResNet34训练

    :return: 训练结果的历史记录
    """

    _input = Input(shape=IMG_SIZE, name='the_input')

    #y_pred = cnn_layer(inputs=_input, n_class=N_CALSS)
    if model == 0:
        y_pred = cnn_layer(inputs=_input,
                           n_class=N_CALSS,
                           last_layer_activation=LAST_LAYER_ACTIVATION)
    elif model == 1:
        network = MobileNet(IMG_SIZE, N_CALSS)
        y_pred = network.build(_input=_input,
                               last_layer_activation=LAST_LAYER_ACTIVATION)
    elif model == 2:
        y_pred = resnet34(inputs=_input,
                          n_class=N_CALSS,
                          last_layer_activation=LAST_LAYER_ACTIVATION)
    else:
        raise (TypeError("The param 'model' must in [0, 1, 2]!"))

    model = Model(inputs=_input, outputs=y_pred)

    #opt = SGD(lr=0.001, momentum=0.5, decay=1e-6)
    #opt = Adam(lr=0.001, decay=1e-6)
    #opt = RMSprop(lr=0.0001, decay=1e-6)
    # 加载与训练模型
    if os.path.exists(PRE_MODEL_PATH):
        print("Loading model weights...")
        basemodel = Model(inputs=_input, outputs=y_pred)
        basemodel.summary()
        basemodel = basemodel.load_weights(PRE_MODEL_PATH)
        print("Done!")
    # 以下是可选的loss,其计算方法请查询官方文档。也可以自定义计算loss的方法。
    # binary_crossentropy适合输出标签为1个的、categorical_crossentropy、sparse_categorical_crossentropy、poisson、kl_divergence
    # mean_squared_error、mean_absolute_error、mean_absolute_percentage_error、mean_squared_logarithmic_error、cosine_similarity、huber、log_cosh
    # hinge、squared_hinge、categorical_hinge
    model.compile(loss=sofrmaxLoss, optimizer='adam', metrics=['accuracy'])
    """
    train_data = ImageDataGenerator(rescale=1. / 255)
    test_data = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_data.flow_from_directory(
        directory=FILE_PATH_TRAIN,
        target_size=(IMG_SIZE[0], IMG_SIZE[1]),
        batch_size=BATCH_SIZE,
        class_mode='categorical')

    test_generator = test_data.flow_from_directory(
        directory=FILE_PATH_TEST,
        target_size=(IMG_SIZE[0], IMG_SIZE[1]),
        batch_size=BATCH_SIZE,
        class_mode='categorical')
    """
    train_generator = genData("D:/data/Dog-cat/train")
    test_generator = genData("D:/data/Dog-cat/test")
    save_dir = SAVE_DIR
    if not os.path.exists(save_dir):
        print("Making Dir:", save_dir)
        os.mkdir(save_dir)

    checkpoint = ModelCheckpoint(filepath=save_dir +
                                 '/test-{epoch:02d}-{val_loss:.2f}.h5',
                                 monitor='val_loss',
                                 save_best_only=False,
                                 save_weights_only=True)
    lr_schedule = lambda epoch: 0.0005 * 0.4**epoch
    learning_rate = np.array([lr_schedule(i) for i in range(EPOCH)])
    changelr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
    tensorboard = TensorBoard(log_dir=save_dir + '/logs', write_graph=True)

    print("---------------start training---------------")
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=24000 // BATCH_SIZE,
        epochs=EPOCH,
        initial_epoch=0,
        validation_data=test_generator,
        validation_steps=1000 // BATCH_SIZE,
        callbacks=[checkpoint, changelr, tensorboard])
    return history.history
    #output = Model(inputs=_input, outputs=model.get_layer(name='conv1_1').output)
    #print(output.predict(_input))
    """