예제 #1
0
파일: play.py 프로젝트: yardal/rlc
def main():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    logging.basicConfig(filename='log.txt', level=logging.DEBUG)
    logging.getLogger().addHandler(logging.StreamHandler())

    net = Network("model")
    self_play(net)
예제 #2
0
def main():
    ValidSampler = Sampler(utils.valid_file)
    TestSampler = Sampler(utils.test_file)
    networks = []
    weights = []
    for i in xrange(5):
        if i == 0:
            TrainSampler = Sampler(utils.train_file)
            prev_ys = np.copy(TrainSampler.labels)
        else:
            TrainSampler = Sampler(utils.train_file, prev_ys)

        network = Network()
        network.train(TrainSampler)

        cur_ys = network.predict(TrainSampler)
        b1 = np.sum(np.multiply(cur_ys, prev_ys))
        b2 = np.sum(np.multiply(cur_ys, cur_ys))
        w = float(b1) / b2
        prev_ys = np.subtract(prev_ys, w * cur_ys)

        print i, 'done with weight', w
        network.save('network_' + str(i) + '.ckpt')
        weights.append(w)
        networks.append(network)

        validate_boost(ValidSampler, networks, weights)
    validate_boost(TestSampler, networks, weights)

    np.save('weights.npy', weights)
예제 #3
0
 def __init__(self, min_action_set, hist_len, checkpoint_policy):
     self.minimal_action_set = min_action_set
     print("hist len", hist_len)
     self.network = Network(len(self.minimal_action_set), hist_len)
     self.network.load_state_dict(
         torch.load(checkpoint_policy)['state_dict'])
     if torch.cuda.is_available():
         print("Initializing Cuda Nets...")
         self.network.cuda()
예제 #4
0
파일: play.py 프로젝트: yardal/rlc
def test(a, b):
    white = Network(a)
    black = Network(b)
    print("training")
    white.train()

    print("playing")
    board = chess.Board()
    for i in range(10):
        print(i)
        board.reset()
        while not board.is_game_over():
            move = play(board, white, True)
            if board.is_game_over():
                break
            board.push(move)
            move = play(board, black, False)
            board.push(move)
        print(board.result())
예제 #5
0
 def __init__(self, min_action_set, learning_rate, alpha, checkpoint_dir,
              hist_len, l2_penalty):
     self.minimal_action_set = min_action_set
     print("hist len", hist_len)
     self.network = Network(len(self.minimal_action_set), hist_len)
     if torch.cuda.is_available():
         print("Initializing Cuda Nets...")
         self.network.cuda()
     self.optimizer = optim.Adam(self.network.parameters(),
                                 lr=learning_rate,
                                 weight_decay=l2_penalty)
     self.checkpoint_directory = checkpoint_dir
예제 #6
0
        Iterator/Generator: get a batch of data
        这个函数是一个迭代器/生成器,用于每一次只得到 chunk_size 这么多的数据
        用于 for/loop, 就像range()函数
        """
        if len(samples) != len(labels):
            raise Exception('Length of samples and labels must equal')
        stepStart = 0 # initial step
        i = 0
        while stepStart < len(samples):
            stepEnd = stepStart + chunk_size
            if stepEnd < len(samples):
                yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
                i += 1
            stepStart = stepEnd

    net = Network(train_batch_size=64, test_batch_size=500, pooling_scale=2)
    net.define_inputs(
        train_samples_shape=(64, image_size, image_size, num_channels),
        train_labels_shape=(64, num_labels),
        test_samples_shape=(500, image_size, image_size, num_channels)
    )
    net.add_conv(patch_size=3, in_depth=num_channels, out_depth=16, activation='relu', pooling=False, name='conv1')
    net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=True, name='conv2')
    net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=False, name='conv3')
    net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=True, name='conv4')

    # 4 = 两次 pooling,每一次缩小为1/2
    # 16 = conv4 out_depth
    net.add_fc(in_num_nodes=(image_size // 4) * (image_size // 4) * 16, out_num_nodes=16, activation='relu', name='fc1')
    net.add_fc(in_num_nodes=16, out_num_nodes=10, activation='relu', name='fc2')
예제 #7
0
        用于 for loop, just like range() function
        """
        if len(samples) != len(labels):
            raise Exception('Length of samples and labels must equal')
        stepStart = 0  # initial step
        i = 0
        while stepStart < len(samples):
            stepEnd = stepStart + chunkSize
            if stepEnd < len(samples):
                yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
                i += 1
            stepStart = stepEnd

    net = Network(train_batch_size=64,
                  test_batch_size=500,
                  pooling_scale=2,
                  dropout_rate=0.9,
                  base_learning_rate=0.001,
                  decay_rate=0.99)
    net.define_inputs(
        train_samples_shape=(64, image_size, image_size, num_channels),
        train_labels_shape=(64, num_labels),
        test_samples_shape=(500, image_size, image_size, num_channels),
    )
    #
    net.add_conv(patch_size=3,
                 in_depth=num_channels,
                 out_depth=32,
                 activation='relu',
                 pooling=False,
                 name='conv1')
    net.add_conv(patch_size=3,
예제 #8
0
파일: train.py 프로젝트: drecker/moles
import os
import numpy as np
from sklearn.tree import DecisionTreeClassifier as Model
from cnn import Network

DATA_DIR = 'data'

pipeline = [
    (preprocessing.extract_rgb, True),
    # (preprocessing.enrich_mirror, False),
]

data_file = os.path.join(DATA_DIR, 'data_train.dat')
targets_file = os.path.join(DATA_DIR, 'targets_train.dat')
data, targets = preprocessing.load_data(data_file), preprocessing.load_data(targets_file)

data_train, data_validation, targets_train, targets_validation = sklearn.model_selection.train_test_split(
    data, targets, test_size=0.25, random_state=42, shuffle=True, stratify=targets
)

for func, apply_test in pipeline:
    data_train, targets_train = func(data_train, targets_train)
    if apply_test:
        data_validation, targets_validation = func(data_validation, targets_validation)


print(data_train.shape[1:])
model = Network(input_shape=data_train.shape[1:])
model.fit(data_train, targets_train, data_validation, targets_validation)
predict = model.predict(data_validation)
print(sklearn.metrics.accuracy_score(targets_validation, predict))
예제 #9
0
        description='Farsi digit Detection Network')

    parser.add_argument("--cfg",
                        dest='cfgfile',
                        help="Config file",
                        default="cfg/architecture.cfg",
                        type=str)

    return parser.parse_args()


args = arg_parse()

#Set up the neural network
print("Preparing network .....")
network = Network(args.cfgfile)
network.compile()

print("Loading input .....")
dataset = Dataset()
x_train, y_train, x_test, y_test = dataset.loadData(
    network.net_info.input_shape)

# # Encode the data
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

print("Training network .....")
network.fit(x_train, y_train, x_test, y_test)

print("evaluation: ")
예제 #10
0
def main():
    current_time = datetime.now().strftime('%Y%m%d-%H%M')
    checkpoint_dir = 'checkpoints'
    if FLAGS.checkpoint is not None:
        checkpoint_path = os.path.join(checkpoint_dir,
                                       FLAGS.checkpoint.lstrip('checkpoints/'))
    else:
        checkpoint_path = os.path.join(checkpoint_dir,
                                       '{}'.format(current_time))
        try:
            os.makedirs(checkpoint_path)
        except os.error:
            print('Unable to make checkpoints direction: %s' % checkpoint_path)
    model_save_path = os.path.join(checkpoint_path, 'model.ckpt')

    nn = Network()

    saver = tf.train.Saver()
    print('Build session.')
    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=tfconfig)

    if FLAGS.checkpoint is not None:
        print('Restore from pre-trained model.')
        checkpoint = tf.train.get_checkpoint_state(checkpoint_path)
        meta_graph_path = checkpoint.model_checkpoint_path + '.meta'
        restore = tf.train.import_meta_graph(meta_graph_path)
        restore.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
        step = int(meta_graph_path.split('-')[2].split('.')[0])
    else:
        print('Initialize.')
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        step = 0

    loss_list = []
    train_accuracy_list = []
    val_accuracy_list = []
    test_accuracy_list = []
    step = 0

    train_writer = tf.summary.FileWriter('logs/train@' + current_time,
                                         sess.graph)
    val_writer = tf.summary.FileWriter('logs/valid@' + current_time,
                                       sess.graph)
    summary_op = tf.summary.merge_all()

    print('Start training:')
    train_len = len(y_train)
    for epoch in range(config.num_epochs):
        permutation = np.random.permutation(train_len)
        X_train_data = X_train[permutation]
        y_train_data = y_train[permutation]
        data_idx = 0
        while data_idx < train_len - 1:
            X_train_batch = X_train_data[data_idx:np.
                                         clip(data_idx +
                                              config.batch_size, 0, train_len -
                                              1)]
            y_train_batch = y_train_data[data_idx:np.
                                         clip(data_idx +
                                              config.batch_size, 0, train_len -
                                              1)]
            data_idx += config.batch_size

            loss, _, train_accuracy, summary, lr = sess.run(
                [
                    nn.loss, nn.optimizer, nn.accuracy, summary_op,
                    nn.learning_rate
                ], {
                    nn.X_inputs: X_train_batch,
                    nn.y_inputs: y_train_batch,
                    nn.keep_prob: config.keep_prob,
                    nn.training: True
                })
            loss_list.append(loss)
            train_accuracy_list.append(train_accuracy)
            print(
                '>> At step %i: loss = %.2f, train accuracy = %.3f%%, learning rate = %.7f'
                % (step, loss, train_accuracy * 100, lr))
            train_writer.add_summary(summary, step)
            step += 1

        accuracy, summary = sess.run(
            [nn.accuracy, summary_op], {
                nn.X_inputs: X_val,
                nn.y_inputs: y_val,
                nn.keep_prob: 1.0,
                nn.training: False
            })
        val_accuracy_list.append(accuracy)
        print('For epoch %i: valid accuracy = %.2f%%\n' %
              (epoch, accuracy * 100))
        val_writer.add_summary(summary, epoch)

    test_len = len(y_test)
    data_idx = 0
    while data_idx < test_len - 1:
        X_test_batch = X_test[data_idx:np.clip(data_idx +
                                               config.batch_size, 0, test_len -
                                               1)]
        y_test_batch = y_test[data_idx:np.clip(data_idx +
                                               config.batch_size, 0, test_len -
                                               1)]
        data_idx += config.batch_size

        test_accuracy = sess.run(
            nn.accuracy, {
                nn.X_inputs: X_test_batch,
                nn.y_inputs: y_test_batch,
                nn.keep_prob: 1.0,
                nn.training: False
            })
        test_accuracy_list.append(test_accuracy)

    save_path = saver.save(sess, model_save_path, global_step=step)
    print('Model saved in file: %s' % save_path)
    sess.close()
    train_writer.close()
    val_writer.close()
    print('Test accuracy = %.2f%%\n' % (np.mean(test_accuracy_list) * 100))