예제 #1
0
파일: value_save.py 프로젝트: wwbld/Gomoku
def main():
    training_data, training_target = util.read_value_csv(TRAINING)
    testing_data, testing_target = util.read_value_csv(TESTING)

    training = util.DataSet(training_data, training_target)
    test = util.DataSet(testing_data, testing_target)

    x = tf.placeholder(tf.float32, [None, 128], name="x")
    y_ = tf.placeholder(tf.float32, [None, 2], name="y_")

    y_conv, keep_prob = deepnn(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                                logits=y_conv)

    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1, name="output"),
                                      tf.argmax(y_, 1, name="target"))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction, name="predict_op")

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(20000):
            batch = training.next_batch(100)
            if i % 100 == 0:
                training_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                print('step %d, training accuracy %g' % (i, training_accuracy))
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            })
            #best = sess.run([y_conv], feed_dict={x:batch[0], y_:batch[1], keep_prob:1.0})
            #print(best)

        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: test._images,
            y_: test._labels,
            keep_prob: 1.0
        }))

        saver.save(sess, "value_model/value_model")
예제 #2
0
def main(unused_argv):
    training_data, training_target = util.read_csv(TRAINING)
    testing_data, testing_target = util.read_csv(TESTING)

    training = util.DataSet(training_data, training_target)
    test = util.DataSet(testing_data, testing_target)

    x = tf.placeholder(tf.float32, [None, 9], name="x")
    y_ = tf.placeholder(tf.float32, [None, 1], name="y_")

    y_conv, keep_prob = deepnn(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.square(y_conv - y_)

    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.cast(y_conv, tf.int32, name="output"),
                                      tf.cast(y_, tf.int32, name="target"))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction, name="predict_op")

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(50000):
            batch = training.next_batch(50)
            if i % 1000 == 0:
                training_accuracy = cross_entropy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                print('step %d, loss %g' % (i, training_accuracy))
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            })

        print('test loss %g' % cross_entropy.eval(feed_dict={
            x: test._images,
            y_: test._labels,
            keep_prob: 1.0
        }))
        saver.save(sess, "model_1")
예제 #3
0
def main(unused_argv):
    training_data, training_target, testing_data, testing_target = util.read_csv(
        FILE)

    training = util.DataSet(training_data, training_target)
    test = util.DataSet(testing_data, testing_target)

    x = tf.placeholder(tf.float32, [None, 64 * 64 * 3], name="x")
    y_ = tf.placeholder(tf.float32, [None, 20], name="y_")

    y_conv, keep_prob = build_model(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                                logits=y_conv)

    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1, name="output"),
                                      tf.argmax(y_, 1, name="target"))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction, name="predict_op")

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(20000):
            batch = training.next_batch(10)
            arr = convertLabels(batch[1])
            if i % 1 == 0:
                training_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: arr,
                    keep_prob: 1.0
                })
                print('step %d, training accuracy %g' % (i, training_accuracy))
            train_step.run(feed_dict={x: batch[0], y_: arr, keep_prob: 0.5})

        arr = convertLabels(test._labels)
        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: test._images,
            y_: arr,
            keep_prob: 1.0
        }))
        saver.save(sess, "model_1")
예제 #4
0
파일: test.py 프로젝트: wwbld/CSC570
def main():
    testing_data, testing_target = util.read_csv(TESTING)
    test = util.DataSet(testing_data, testing_target)

    graph = util.ImportGraph('./', 'model_1')
    
    for i in  range(20):
        print("output: {0}, target: {1}".format(graph.get_predict(test._images[i]), test._labels[i]))  
예제 #5
0
파일: value_test.py 프로젝트: wwbld/Gomoku
def main():
    testing_data, testing_target = util.read_value_csv(TESTING)
    test = util.DataSet(testing_data, testing_target)

    with tf.Session() as sess:
        saver = tf.train.import_meta_graph("value_model/value_model.meta")
        saver.restore(sess, tf.train.latest_checkpoint("value_model/"))
        graph = tf.get_default_graph()
        x = graph.get_tensor_by_name("x:0")
        y_ = graph.get_tensor_by_name("y_:0")
        output = graph.get_tensor_by_name("accuracy/output:0")
        target = graph.get_tensor_by_name("accuracy/target:0")
        keep_prob = graph.get_tensor_by_name("dropout/keep_prob:0")
        feed_dict = {x: testing_data, y_: testing_target, keep_prob: 1.0}
        predict_op = graph.get_tensor_by_name("predict_op:0")
        print("accuracy is %g" % sess.run(predict_op, feed_dict))
예제 #6
0
parser.add_argument('--gpu_ids',
                    default='2',
                    type=str,
                    help='id of gpu ex) "0" or "0,1"')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--savepic', action='store_true')

args = parser.parse_args(
    '--name test --ep best --whichset train_val --batch_id 0 --shuffle'.split(
    ))
# args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

dataset = util.DataSet('./dataset/' + args.whichset, args.whichset)
dataloader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        num_workers=args.num_workers,
                        shuffle=args.shuffle)

exp_path, train_path, val_path, infer_path, ckpt_path = util.make_path(args)

model = torch.load(ckpt_path + '/' + 'model.archi').to(device)
# model = nn.DataParallel(model).to(device)
model.load_state_dict(
    torch.load(ckpt_path + '/' + 'weight_' + args.ep + '.pth')['state_dict'])

dataiter = iter(dataloader)
for _ in range(args.batch_id + 1):
    data2show = next(dataiter)
예제 #7
0
    X_train_transformed = np.zeros_like(X_train)
    y_train_transformed = np.zeros_like(y_train)
    for i in range(X_train_transformed.shape[0]):
        X_train_transformed[i] = util.transform_image(X_train[i], 20, 10, 5)
        y_train_transformed[i] = y_train[i]

    X_train = np.vstack((X_train, X_train_transformed))
    y_train = np.hstack((y_train, y_train_transformed))
    y_train = y_train.astype(int)

    X_train_centered = util.min_max_normalization(X_train)
    X_test_centered = util.min_max_normalization(X_test)
    y_train, y_test = util.one_hot_encoding(y_train, y_test)
    train_features, dev_features, train_labels, dev_labels = util.train_dev_split(X_train_centered, y_train, 0.1)

    training_dataset = util.DataSet(train_features, train_labels)
    dev_dataset = util.DataSet(dev_features, dev_labels)
    testing_dataset = util.DataSet(X_test_centered, y_test)

    saver = tf.train.Saver()
    best_dev_acc = 1e-10

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        steps_per_epoch = len(train_features) // BATCH_SIZE
        num_examples = steps_per_epoch * BATCH_SIZE

        training_accuracies = []
        dev_accuracies = []

        training_losses = []
예제 #8
0
def main():
    testing_data, testing_target = util.read_csv(TESTING)
    test = util.DataSet(testing_data, testing_target)

    graph = util.ImportGraph('./', 'model_1')
    print(graph.get_predict(test._images[0]))
예제 #9
0
                    help='number of layer of backbone CNN to update weight')

args = parser.parse_args(
    '--name 1stexp --optimizer sgd --lr 0.1 --backbone mobilenet_v2'.split())
# args = parser.parse_args()

exp_path, train_path, val_path, infer_path, ckpt_path = util.make_path(args)

f = open(ckpt_path + '/' + 'exp_config.txt', 'w')
json.dump(args.__dict__, f, indent=2)
f.close()

os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

trainset = util.DataSet('./dataset/train', 'train')
valset = util.DataSet('./dataset/val', 'train_val')

train_loader = DataLoader(trainset,
                          shuffle=True,
                          batch_size=args.batch_size,
                          num_workers=args.num_workers)
val_loader = DataLoader(valset,
                        shuffle=False,
                        batch_size=args.batch_size,
                        num_workers=args.num_workers)

backbone = ResNetBackbone(args.backbone) if args.backbone.startswith(
    'resnet') else MobileNetBackbone(args.backbone)

total_layer_ct = sum(1 for _ in backbone.parameters())