Beispiel #1
0
def main():
    config = Config()

    modeler = model.VGG(config)

    # read data to train("data/train")
    train_reader = read_data.VGGReader(config)

    modeler.inference()
    loss = modeler.loss
    train_op = modeler.train_op(loss)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver(max_to_keep=100)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        #saver.restore(sess, config.param_dir + config.load_filename)
        #print "restore params" + config.steps

        merged = tf.summary.merge_all()
        logdir = os.path.join(config.log_dir,
                              datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
        train_writer = tf.summary.FileWriter(logdir, sess.graph)

        #start training
        print 'start training'
        for step in range(config.max_step):
            #start_time = time.time()

            with tf.device('/cpu:0'):
                images_train, labels_train, filesname_train = train_reader.get_random_batch(
                    True)

            feed_dict = {
                modeler.image_holder: images_train,
                modeler.label_holder: labels_train,
                modeler.is_train: True
            }

            with tf.device('/gpu:0'):
                _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)

            with tf.device('/cpu:0'):
                if (step + 1) % config.checkpointer_iter == 0:
                    modeler.save_npy(
                        sess, config.param_dir + config.save_filename +
                        str(modeler.global_step.eval()) + '.npy')

                if (step + 1) % config.summary_iter == 0:
                    summary = sess.run(merged, feed_dict=feed_dict)
                    train_writer.add_summary(summary,
                                             modeler.global_step.eval())

            if step % 10 == 0:
                print 'step %d, loss = %.3f' % (step, loss_value)
Beispiel #2
0
def main():
    config = Config()
    #modeler = model.VGG(config)
    modeler = model.VGG16(include_top=False)
    #modeler = model.ResNet50(include_top=False, weights='imagenet', input_shape=[config.img_height,config.img_width, 3], pooling='max')
    inputs = Input(shape=[config.img_height,config.img_width,3])
    y = modeler(inputs)
    
    # fine tune the model
    '''
    y = Dense(config.type_size, activation='softmax', name='fc17')(y)
    modeler = Model(inputs, y, name='resnet50')
    '''
    y = Flatten()(y)
    y = Dense(4096, activation='relu', name='fc1')(y)
    y = Dense(4096, activation='relu', name='fc2')(y)
    y = Dense(config.type_size, activation='softmax', name='predictions')(y)
    modeler = Model(inputs, y, name='vgg16')

    print "restore params" + str(config.restore_steps)
    modeler.load_weights(config.params_dir + config.net_name + "-weather-params-" + str(config.restore_steps) + ".h5")

    modeler.compile(loss='categorical_crossentropy', optimizer=SGD(lr=config.learning_rate, momentum=0.9,nesterov=True))

    # read data to train("data/train")
    train_reader = read_data.VGGReader(config)

    init = tf.global_variables_initializer()
    
    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        print 'start training'
        for step in range(config.max_step):

            with tf.device('/cpu:0'):
                images_train, labels_train, filesname_train = train_reader.get_random_batch(False)

            #images_train = preprocess_input(images_train) 
            if step == 0:
                print "images_train[0]: ", images_train[0]
            modeler.train_on_batch(images_train, labels_train)
            loss_value = modeler.test_on_batch(images_train, labels_train)

            if step%10 == 0:
                print 'step %d, loss = %.3f' % (step, loss_value)
                #print prediction
            if (step+1) % config.save_iter == 0:
                # save weights
                modeler.save_weights(config.params_dir + config.net_name + "-weather-params-" + str(step+1+config.restore_steps) + ".h5")

                pre = modeler.predict(images_train)
                for i in range(config.batch_size):
                    print labels_train[i]
                    print pre[i]
Beispiel #3
0
def main():
    config = Config()

    modeler = model.VGG(config)

    #read data to val("data/val")
    val_reader = read_data.VGGReader(config)

    modeler.inference()
    accuracy = modeler.accuracy()

    init = tf.global_variables_initializer()

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        #saver.restore(sess, config.param_dir+config.load_filename)
        print 'restore params' + config.steps

        #testing
        confmat = np.zeros((20, 20))
        count = 0
        num_iter = config.test_size // config.batch_size
        for i in range(num_iter):
            with tf.device('/cpu:0'):
                images_val, labels_val, filenames_val = val_reader.batch()

            with tf.device("/gpu:0"):

                predict = sess.run(modeler.pred,
                                   feed_dict={
                                       modeler.image_holder: images_val,
                                       modeler.is_train: False
                                   })
                #print predict, labels_val
                if labels_val[0][predict[0]] > 0:
                    count += 1
                    confmat[predict[0]][predict[0]] += 1
                else:
                    for i in range(20):
                        if labels_val[0][i] > 0:
                            confmat[i][predict[0]] += 1

        confmat = confmat * 20 / 5823

        print 'AP: ', count * 1.0 / config.test_size
        print 'confusion matrix: '
        print confmat
Beispiel #4
0
def main():
    config = Config()
    #modeler = model.VGG(config)
    modeler = model.VGG16(include_top=False)
    #modeler = model.ResNet50(include_top=False, input_shape=[config.img_height,config.img_width, 3], pooling='max')
    inputs = Input(shape=[config.img_height, config.img_width, 3])
    y = modeler(inputs)

    y = Flatten()(y)
    y = Dense(4096, activation='relu', name='fc1')(y)
    y = Dense(4096, activation='relu', name='fc2')(y)
    y = Dense(config.type_size, activation='softmax', name='predictions')(y)
    modeler = Model(inputs, y, name='vgg16')
    '''
    y = Dense(config.type_size, activation='softmax', name='fc17')(y)
    modeler = Model(inputs, y, name='resnet50')
    '''

    modeler.load_weights(config.params_dir + config.net_name + "-params-" +
                         str(config.test_num) + ".h5")

    # read data to test("data/train")
    val_reader = read_data.VGGReader(config)

    print "finding thresholds..."
    for step in range(config.max_step):
        if step % (config.test_size // 10) == 0:
            print 100 * step // config.test_size, "%"

        images_val, labels_val, filesname_val = val_reader.get_batch()
        probs = modeler.predict(images_val)

        thresholds, scores = find_threshold1(probs, labels_val)
        i = np.argmax(scores)
        best_threshold, best_score = thresholds[i], scores[i]

        best_thresholds, best_scores = find_threshold2(probs,
                                                       labels_val,
                                                       num_iters=500,
                                                       seed=best_threshold)

    with open(
            "./thresholds/" + config.net_name + "-weather-thresholds-" +
            config.test_num + ".txt", 'w') as fr:
        for i in range(len(best_thresholds)):
            fr.write(str(best_thresholds[i]) + " ")
Beispiel #5
0
def main():
    config = Config()
    modeler = model.VGG(config)
    modeler.inference()

    # read data to test("data/train")
    val_reader = read_data.VGGReader(config)

    init = tf.global_variables_initializer()

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        print 'restore params' + config.test_num

        print "finding thresholds..."
        for step in range(config.max_step):
            if step % (config.test_size // 10) == 0:
                print 100 * step // config.test_size, "%"
            with tf.device("/cpu:0"):
                images_val, labels_val, filesname_val = val_reader.batch()

            with tf.device("/gpu:0"):
                _, probs = sess.run([modeler.pred, modeler.prob],
                                    feed_dict={
                                        modeler.image_holder: images_val,
                                        modeler.is_train: False
                                    })

            thresholds, scores = find_threshold1(probs, labels_val)
            i = np.argmax(scores)
            best_threshold, best_score = thresholds[i], scores[i]

            best_thresholds, best_scores = find_threshold2(probs,
                                                           labels_val,
                                                           num_iters=500,
                                                           seed=best_threshold)

        with open(
                "./thresholds/" + config.net_name + "-thresholds-" +
                config.test_num + ".txt", 'w') as fr:
            for i in range(len(best_thresholds)):
                fr.write(str(best_thresholds[i]) + " ")
Beispiel #6
0
def main():
    config = Config()
    #modeler = model.VGG(config)
    #modeler = model.VGG16(include_top=False)
    modeler = model.ResNet50(include_top=False, input_shape=[config.img_height,config.img_width, 3], pooling='max')
    inputs = Input(shape=[config.img_height,config.img_width,3])
    y = modeler(inputs)

    '''
    y = Flatten()(y)
    y = Dense(4096, activation='relu', name='fc1')(y)
    y = Dense(4096, activation='relu', name='fc2')(y)
    y = Dense(config.type_size, activation='softmax', name='predictions')(y)
    modeler = Model(inputs, y, name='vgg16')

    '''
    y = Dense(config.type_size, activation='softmax', name='fc17')(y)
    modeler = Model(inputs, y, name='resnet50')
    modeler.load_weights(config.params_dir + config.net_name + "-params-" + str(config.test_num) + ".h5")

    #modeler.compile(loss='categorical_crossentropy', optimizer=SGD(lr=config.learning_rate, momentum=0.9,nesterov=True))

    # read data to test("data/train")
    test_reader = read_data.VGGReader(config)

    pre_prob = list()
    with open("./thresholds/" + config.net_name + "-thresholds-" + config.test_num + ".txt", 'rb')  as fr:
        for line in fr:
            tmp = re.split(' ', line.strip())
            for i in range(config.type_size):
                pre_prob.append(float(tmp[i]))
    print "thresholds: ", pre_prob

    test_labels = list()
    pre_labels = list()
    val_labels = list()
    print "start testing..."
    for step in range(config.test_size):
        if step % (config.test_size // 10) == 0:
            print 100 * step // config.test_size, "%"

        images_test, labels_test, filesname_test = test_reader.batch()
        prob = modeler.predict(images_test)
        test_index = list()
        for i in range(config.type_size):
            val_labels.append(labels_test[0][i])

            if prob[0][i] > pre_prob[i]:
                test_index.append(i)
    
        s = filesname_test[0]
        for n in range(config.type_size):
            is_in = False
            for m in range(len(test_index)):
                if n == test_index[m]:
                    is_in = True
            if is_in:
                s += " 1.0"
                pre_labels.append(1.0)
            else:
                s += " 0.0"
                pre_labels.append(0.0)

        test_labels.append(s)

    print "scores: ", fbeta_score(val_labels, pre_labels, beta=2)
   
    if config.val_mode == False:
        with open("./labels/test_results.csv", 'w') as fr:
            fcsv = csv.writer(fr)
            for i in range(len(test_labels)):
                fcsv.writerow([test_labels[i]])
Beispiel #7
0
def main():
    config = Config()

    modeler = model.VGG(config)

    #read data to val("data/val")
    val_reader = read_data.VGGReader(config)

    modeler.inference()
    accuracy = modeler.accuracy()

    init = tf.global_variables_initializer()
    #saver = tf.train.Saver(max_to_keep=200)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True

    label_indexs = list()
    test_labels = list()

    with tf.Session(config=sess_config) as sess:
        sess.run(init)
        #saver.restore(sess, Config.vgg_path)
        print 'restore params' + config.steps

        #testing
        count = 0
        num_iter = config.test_size // config.batch_size
        max_false_probs = [0.0] * config.type_size
        max_true_probs = [0.0] * config.type_size
        pre_prob = [
            0.13, 0.13, 0.13, 0.13, 0.13, 0.13, 0.13, 0.6, 0.13, 0.13, 0.13,
            0.13, 0.13, 0.13, 0.13, 0.13, 0.13
        ]
        for i in range(num_iter):
            label_index = list()
            with tf.device('/cpu:0'):
                images_val, labels_val, filenames_val = val_reader.batch()

            with tf.device("/gpu:0"):

                predict, prob = sess.run([modeler.pred, modeler.prob],
                                         feed_dict={
                                             modeler.image_holder: images_val,
                                             modeler.is_train: False
                                         })
                if i < 20:
                    print predict, labels_val
                    print prob
                if i % (num_iter // 10) == 0:
                    print 100 * i / num_iter, "%"
                for j in range(config.type_size):
                    if (labels_val[0][j] == 1.0
                            and prob[0][j] > max_true_probs[j]):
                        max_true_probs[j] = prob[0][j]

                    if (labels_val[0][j] == 0.0
                            and prob[0][j] > max_false_probs[j]):
                        max_false_probs[j] = prob[0][j]

                    if prob[0][j] > pre_prob[j]:
                        label_index.append(j)

                label_indexs.append(label_index)

                is_correct = True
                for k in range(len(label_index)):
                    if labels_val[0][label_index[k]] == 0.0:
                        is_correct = False
                        break

                count_in = 0
                count_in1 = 0
                for k in range(len(labels_val[0])):
                    if labels_val[0][k] > 0.0:
                        count_in1 += 1
                        for n in range(len(label_index)):
                            if label_index[n] == k:
                                count_in += 1
                                break
                if count_in != count_in1:
                    is_correct = False

                s = filenames_val[0]
                for n in range(17):
                    is_in = False
                    for m in range(len(label_index)):
                        if n == label_index[m]:
                            is_in = True
                    if is_in:
                        s += " 1.0"
                    else:
                        s += " 0.0"
                test_labels.append(s)

                if is_correct:
                    count += 1
        with open("./labels/test_labels_pro.csv", 'w') as fr:
            fcsv = csv.writer(fr)
            for i in range(len(test_labels)):
                fcsv.writerow([test_labels[i]])
        '''