Beispiel #1
0
def run_svm():

    F = ld.Flower()
    flowers, Image, Label, Label_onehot = F.read_img()
    Train_img, Train_label, Validation_img, Validation_label, Test_img, Test_label = F.split_data(
        flowers, Image, Label, Label_onehot, returnwhat=1)

    shape = np.shape(Train_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Train_img_flatten = np.reshape(Train_img, (N, dim))

    shape = np.shape(Validation_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Validation_img_flatten = np.reshape(Validation_img, (N, dim))

    shape = np.shape(Test_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Test_img_flatten = np.reshape(Test_img, (N, dim))

    learning_rates = [1.4e-4, 1.5e-4, 1.6e-4]
    regularization_strengths = [(1 + i * 0.1) * 1e-4 for i in range(-3, 3)] + [
        (2 + 0.1 * i) * 1e-4 for i in range(-3, 3)
    ]

    all_W = []
    all_acc = []
    for i in range(len(learning_rates)):
        for j in range(len(regularization_strengths)):
            W = train(Train_img_flatten,
                      Train_label,
                      learning_rates[i],
                      regularization_strengths[j],
                      num_iters=200,
                      batch_size=200)
            all_W.append(W)
            predict_label = predict(Validation_img_flatten)
            acc = accuracy(Validation_label, predict_label)
            all_acc.append(acc)
            print("learning_rates=%f,regularization_strengths=%f,accuracy=%f" %
                  (learning_rates[i], regularization_strengths[j], acc))

    index = np.argmax(all_acc)
    W = all_W[index]
    i = index / len(regularization_strengths)
    j = index % len(regularization_strengths)

    Test_predict = predict(Test_img_flatten)
    acc = accuracy(Test_label, Test_predict)
    print("The accuracy of test set is %f" % acc)
def run_resnet50(batch_size=16, epochs=20, lr=0.0001):
    resnet_weights_path = '/home/songyu/AICA/codes/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

    image_size = 224
    num_classes = 5

    F = ld.Flower()
    flowers, Image, Label, Label_onehot = F.read_img(image_size)
    X_train, y_train, X_val, y_val, X_test, y_test = F.split_data(flowers,
                                                                  Image,
                                                                  Label,
                                                                  Label_onehot,
                                                                  returnwhat=2)
    X_train = np.vstack([X_train, X_val])
    y_train = np.vstack([y_train, y_val])

    model = Sequential()

    model.add(
        ResNet50(include_top=False, pooling='avg',
                 weights=resnet_weights_path))
    model.add(Flatten())
    model.add(BatchNormalization())
    model.add(Dense(2048, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(1024, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(num_classes, activation='softmax'))

    model.layers[0].trainable = False

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(X_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              validation_split=0.05)
    score = model.evaluate(X_test, y_test, batch_size=batch_size)
    print('test loss and accuracy is', score)
Beispiel #3
0
def run_knn():

    F = ld.Flower()
    flowers, Image, Label, Label_onehot = F.read_img()
    Train_img, Train_label, Validation_img, Validation_label, Test_img, Test_label = F.split_data(
        flowers, Image, Label, Label_onehot, returnwhat=1)

    shape = np.shape(Train_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Train_img_flatten = np.reshape(Train_img, (N, dim))

    shape = np.shape(Validation_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Validation_img_flatten = np.reshape(Validation_img, (N, dim))

    shape = np.shape(Test_img)
    N = shape[0]
    dim = shape[1] * shape[2] * shape[3]
    Test_img_flatten = np.reshape(Test_img, (N, dim))

    all_acc = []
    dists = compute_distances(Validation_img_flatten, Train_img_flatten)
    for k in range(1, 11):
        validation_pred = predict_labels(dists, k, Train_label)
        acc = accuracy(Validation_label, validation_pred)
        all_acc.append(acc)
        print("k = %d , accuracy is %f" % (k, acc))

    index = np.argmax(all_acc)
    k = index + 1
    dists = compute_distances(Test_img_flatten, Train_img_flatten)
    test_predict = predict_labels(dists, k, Train_label)
    acc = accuracy(Test_label, test_predict)
    print("k = %d , The accuracy of test set is %f" % (k, acc))
Beispiel #4
0
def run_fc(lr, epochs, batch_size, reg_rate):
    #数据有问题
    F = ld.Flower()
    flowers, Image, Label, Label_onehot = F.read_img()
    Train_img, Train_label, Validation_img, Validation_label, Test_img, Test_label = F.split_data(
        flowers, Image, Label, Label_onehot, returnwhat=1)
    # Train_img = np.random.randn(200,128,128,3)
    # Train_label = np.random.uniform(0,5,200).astype(np.int32)

    N = Train_img.shape[0]
    M = Validation_img.shape[0]
    T = Test_img.shape[0]
    index = list(range(N))

    model = FC()
    img = tf.placeholder(dtype=tf.float32, shape=(None, 128, 128, 3))
    true = tf.placeholder(dtype=tf.int64, shape=(None))
    is_training = tf.placeholder(tf.bool)
    predicted_onehot, reg = model.fcn(img, reg_rate, is_training)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=true, logits=predicted_onehot)) + reg
    predicted = tf.argmax(predicted_onehot, axis=-1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, true), tf.float32))
    optim = tf.train.AdamOptimizer(lr).minimize(loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)

    start = time.time()
    for i in range(epochs):
        batch_id = random.sample(index, batch_size)
        train_img_batch = Train_img[batch_id]
        train_label_batch = Train_label[batch_id]

        _, loss_, accuracy_ = sess.run([optim, loss, accuracy],
                                       feed_dict={
                                           img: train_img_batch,
                                           true: train_label_batch,
                                           is_training: True
                                       })
        if (i + 1) % 1000 == 0:
            val_accuracy = []
            # print('step:',i+1,'loss:',loss_,'accuracy:',accuracy_)
            s = 0
            while (s < M):
                e = min(s + batch_size, M)
                val_acc = sess.run(
                    accuracy,
                    feed_dict={
                        img: Validation_img[s:min(s + batch_size, M)],
                        true: Validation_label[s:min(s + batch_size, M)],
                        is_training: False
                    })
                val_accuracy.append(val_acc * (e - s))
                s = e
            val_acc = sum(val_accuracy) / M
            end = time.time()
            duration = end - start
            start = time.time()
            print(
                'step {:d} \t loss = {:.3f} \t train_accuracy =  {:.3f} \t val_accuracy = {:.3f} \t ({:.3f} sec/1000_step)'
                .format(i + 1, loss_, accuracy_, val_acc, duration))

    t = 0
    test_accuracy = []
    while (t < T):
        e = min(t + batch_size, T)
        test_acc = sess.run(accuracy,
                            feed_dict={
                                img: Test_img[t:e],
                                true: Test_label[t:e],
                                is_training: False
                            })
        test_accuracy.append(test_acc * (e - t))
        t = e
    print('test accuracy is', sum(test_accuracy) / T)
def run_resnet(lr,epochs,batch_size,reg_rate):
    '''
    lr: learning rate
    epochs: number of trainings
    batch_size: number of samples of a batch
    '''
    F = ld.Flower()
    flowers,Image,Label,Label_onehot = F.read_img()
    Train_img,Train_label,Validation_img,Validation_label,Test_img,Test_label = F.split_data(flowers,Image,Label,Label_onehot,1,train=0.85,val=0.9)


    N = Train_img.shape[0]
    index = list(range(N))
    M = Validation_img.shape[0]
    T = Test_img.shape[0]

    model = Resnet()
    img = tf.placeholder(dtype=tf.float32,shape=(None,128,128,3))
    true = tf.placeholder(dtype=tf.int64,shape=(None))
    is_training = tf.placeholder(tf.bool)

    # steps=tf.Variable(0,name='global_step',trainable=False)
    # lr=tf.train.exponential_decay(lr,steps,100,0.95,staircase=True,name= 'learning_rate')
    # update=tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    predicted_onehot,reg = model.resnet(img,is_training,reg_rate)
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true,logits=predicted_onehot))+reg
    predicted = tf.argmax(tf.nn.softmax(predicted_onehot),axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted,true),tf.float32))
    optim = tf.train.AdamOptimizer(lr).minimize(loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)

    start = time.time()
    for i in range(epochs):
        batch_id = random.sample(index,batch_size)
        train_img_batch = Train_img[batch_id]
        train_label_batch = Train_label[batch_id]

        # sess.run(update)
        _,loss_,accuracy_= sess.run([optim,loss,accuracy],feed_dict={
            img:train_img_batch,
            true:train_label_batch,
            is_training:True
            })
        if (i+1)%100==0:
            val_accuracy = []
            s = 0
            while(s<M):
                e = min(s+batch_size,M)
                val_acc = sess.run(accuracy,feed_dict={
                        img:Validation_img[s:min(s+batch_size,M)],
                        true:Validation_label[s:min(s+batch_size,M)],
                        is_training:False
                        })
                val_accuracy.append(val_acc*(e-s))
                s = e
            val_acc = sum(val_accuracy)/M
            end = time.time()
            duration = end - start
            start = time.time()
            print('step {:d} \t loss = {:.3f} \t train_accuracy =  {:.3f} \t val_accuracy = {:.3f} \t ({:.3f} sec/100_step)'.format(i+1,loss_,accuracy_,val_acc,duration))
            # print(pred)
    t = 0
    test_accuracy = []
    while(t<T):
        e = min(t+batch_size,T)
        test_acc = sess.run(accuracy,feed_dict={
            img:Test_img[t:e],
            true:Test_label[t:e],
            is_training:False
            })
        test_accuracy.append(test_acc*(e-t))
        t = e
    print('test accuracy is',sum(test_accuracy)/T)