示例#1
0
    def _decoder(self, h, reuse=False):

        with tf.variable_scope('decoder', reuse=reuse):

            hidden_layer = mlp(h,
                               self.decoder_layer_sizes,
                               dropout_pct=self.dropout_pct,
                               activation_fn=self.activation_fn,
                               training=self.is_training,
                               reuse=reuse)

            mu = tf.layers.dense(hidden_layer,
                                 self.n_outputs,
                                 activation=None,
                                 name='mu',
                                 reuse=reuse)

            logvar = tf.layers.dense(hidden_layer,
                                     self.n_outputs,
                                     activation=None,
                                     name='logvar',
                                     reuse=reuse)

        self.t_pred = tf.exp(mu)

        self.decoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                              scope='decoder')

        return mu, logvar
示例#2
0
 def __init__(self, state_size, action_size):
     self.state_size = state_size
     self.action_size = action_size
     self.memory = deque(maxlen=2000)
     self.gamma = 0.95  # discount rate
     self.epsilon = 1.0  # exploration rate
     self.epsilon_min = 0.01
     self.epsilon_decay = 0.995
     self.model = mlp(state_size, action_size)
示例#3
0
 def __init__(self, state_size, action_size, invest_range=(0, 10)):
     self.state_size = state_size
     self.action_size = action_size
     self.memory = deque(maxlen=2000)
     self.gamma = 0.95  # discount rate
     self.epsilon = 1.0  # exploration rate
     self.epsilon_min = 0.01
     self.epsilon_decay = 0.995
     self.invest_range = invest_range
     self.invest_num = invest_range[1] - invest_range[0] + 1
     self.model = mlp(state_size,
                      self.invest_num * action_size[0] * action_size[1])
示例#4
0
    def _build_x(self):

        with tf.variable_scope('embeddings'):

            x_refined = mlp(self.xv,
                            self.embedding_layer_sizes,
                            dropout_pct=0.,
                            activation_fn=tf.nn.tanh)

            x_max = tf.reduce_max(x_refined, axis=1)
            x_mean = tf.reduce_mean(x_refined, axis=1)

            self.x = tf.concat([x_max, x_mean, self.xf], axis=1)
    def __init__(self, actions, input_dim, model_dir):
        # select action function hyper-parameters
        self.epsilon = 0.9
        # q functins hyper-parameters
        self.gamma = 0.01
        # neural network hyper-parmetrs
        self.lr = 0.001

        self.actions = actions
        output_dim = len(actions)

        # neural network input and output placeholder
        self.x = tf.placeholder(tf.float32, [None, input_dim])
        self.y = tf.placeholder(tf.float32, [output_dim])

        # hidden layer dimension
        h1_dim = 200

        # model inference
        self.q = mlp("mlp0", self.x, input_dim, h1_dim, output_dim)

        # loss
        loss = tf.square(self.y - self.q)

        # train operation
        self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)

        # session
        self.sess = tf.Session()

        # initalize
        init_op = tf.initialize_all_variables()
        self.sess.run(init_op)

        # saver
        self.saver = tf.train.Saver(tf.trainable_variables())

        # laod model
        ckpt = tf.train.get_checkpoint_state(model_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("laod model: %s" % (ckpt.model_checkpoint_path))
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
示例#6
0
    def _encoder(self, h):

        with tf.variable_scope('encoder'):

            hidden_layer = mlp(h,
                               self.encoder_layer_sizes,
                               dropout_pct=self.dropout_pct,
                               training=self.is_training,
                               activation_fn=self.activation_fn)

            logits = tf.layers.dense(hidden_layer,
                                     self.n_outputs,
                                     activation=None,
                                     name='logit_weights')

            probs = tf.nn.sigmoid(logits)

        self.encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                              scope='encoder')

        return logits, probs
示例#7
0
def run_train(ps_hosts, worker_hosts, job_name, task_index, model_f, data_path,
              output_path, param_path):

    # ======================================
    # Variables
    ps_hosts = ps_hosts.split(",")
    worker_hosts = worker_hosts.split(",")

    param_dict = load_json(param_path)

    # Create a cluster from the parameter server and worker hosts.
    cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})

    # Create and start a server for the local task.
    server = tf.train.Server(cluster, job_name=job_name, task_index=task_index)

    if job_name == "ps":
        server.join()
    elif job_name == "worker":

        # Load Data
        (X_train, Y_train, X_valid, Y_valid, _,
         _) = read_data(data_path, param_dict['train_ratio'],
                        param_dict['valid_ratio'])

        print("=" * 30)
        print("X_train shape: {}".format(X_train.shape))
        print("Y_train shape: {}".format(Y_train.shape))
        print("X_valid shape: {}".format(X_valid.shape))
        print("Y_valid shape: {}".format(Y_valid.shape))
        print("=" * 30)

        # Inference output dimension
        output_dim = len(Y_train[0])

        # Check is_chief
        is_chief = task_index == 0

        # Assigns ops to the local worker by default.
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % task_index,
                    cluster=cluster)):

            # Build model...
            # Datasets
            train_X_dataset = tf.data.Dataset.from_tensor_slices(X_train)
            train_Y_dataset = tf.data.Dataset.from_tensor_slices(Y_train)
            train_dataset = tf.data.Dataset.zip(
                (train_X_dataset, train_Y_dataset))
            train_dataset = train_dataset.shuffle(
                param_dict['dataset_shuffle_buffer_size']).batch(
                    param_dict['batch_size']).repeat(param_dict['n_epoch'])

            if is_chief:
                valid_X_dataset = tf.data.Dataset.from_tensor_slices(X_valid)
                valid_Y_dataset = tf.data.Dataset.from_tensor_slices(Y_valid)
                valid_dataset = tf.data.Dataset.zip(
                    (valid_X_dataset, valid_Y_dataset))
                valid_dataset = valid_dataset.shuffle(
                    param_dict['dataset_shuffle_buffer_size']).batch(
                        param_dict['batch_size'])

            # Feedable Iterator
            handle = tf.placeholder(tf.string, shape=[])
            iterator = tf.data.Iterator.from_string_handle(
                handle, train_dataset.output_types,
                train_dataset.output_shapes)

            # Iterators
            train_iterator = train_dataset.make_one_shot_iterator()
            train_handle_tensor = train_iterator.string_handle()

            if is_chief:
                valid_iterator = valid_dataset.make_initializable_iterator()
                valid_handle_tensor = valid_iterator.string_handle()

            X, Y = iterator.get_next()
            is_training = tf.placeholder_with_default(False,
                                                      shape=None,
                                                      name="is_training")

            global_step = tf.contrib.framework.get_or_create_global_step()

            logits = mlp(X=X,
                         output_dim=output_dim,
                         is_training=is_training,
                         **param_dict['model_param'])

            Y_pred = slim.softmax(logits)

            loss = slim.losses.softmax_cross_entropy(logits, Y)
            accuracy, correct = calc_metric(Y, Y_pred)

            train_op = tf.train.AdamOptimizer(
                param_dict['learning_rate']).minimize(loss,
                                                      global_step=global_step)

            tf.add_to_collection('X', X)
            tf.add_to_collection('Y_pred', Y_pred)

            #saved_model_tensor_dict = build_saved_model_graph(X,
            #                                                  Y_pred,
            #                                                  saved_model_path)

        # The StopAtStepHook handles stopping after running given steps.
        # hooks = [tf.train.StopAtStepHook(last_step=1000000)]

        # The MonitoredTrainingSession takes care of session initialization,
        # restoring from a checkpoint, saving to a checkpoint, and closing when done
        # or an error occurs.
        with tf.train.MonitoredTrainingSession(
                master=server.target,
                is_chief=is_chief,
                checkpoint_dir=output_path,
                # hooks=hooks,
        ) as mon_sess:

            # Get dataset handle
            train_handle = mon_sess.run(train_handle_tensor)
            valid_handle = mon_sess.run(valid_handle_tensor)

            # Metric window
            acc_window = [0.] * TRAIN_METRIC_WINDOW
            loss_window = [0.] * TRAIN_METRIC_WINDOW

            batch_i = 0
            while not mon_sess.should_stop():
                # Run a training step asynchronously.
                mon_sess.run(train_op,
                             feed_dict={
                                 is_training: True,
                                 handle: train_handle,
                             })
                if is_chief:
                    train_accuracy, train_loss = mon_sess.run([accuracy, loss],
                                                              feed_dict={
                                                                  is_training:
                                                                  False,
                                                                  handle:
                                                                  train_handle,
                                                              })
                    acc_window = acc_window[1:] + [train_accuracy]
                    loss_window = loss_window[1:] + [train_loss]

                    if batch_i % VERBOSE_INTERVAL == 0:
                        recent_mean_train_accuracy = sum(acc_window) / len(
                            acc_window)
                        recent_mean_train_loss = sum(loss_window) / len(
                            loss_window)

                        valid_i = 0
                        valid_correct = 0
                        valid_loss = 0
                        valid_total_num = 0

                        mon_sess.run(valid_iterator.initializer)
                        while True:
                            try:
                                (batch_Y_pred, batch_valid_correct,
                                 batch_valid_loss) = mon_sess.run(
                                     [Y_pred, correct, loss],
                                     feed_dict={
                                         is_training: False,
                                         handle: valid_handle,
                                     })
                                curr_batch_num = batch_Y_pred.shape[0]
                                valid_correct += batch_valid_correct.sum()
                                valid_loss += batch_valid_loss * curr_batch_num
                                valid_total_num += curr_batch_num
                                valid_i += 1
                            except tf.errors.OutOfRangeError:
                                break
                        valid_accuracy = valid_correct / valid_total_num
                        valid_loss = valid_loss / valid_total_num

                        print("-" * 30)
                        print("recent_mean_train_accuracy : {}".format(
                            recent_mean_train_accuracy))
                        print("recent_mean_train_loss : {}".format(
                            recent_mean_train_loss))
                        print("valid_accuracy : {}".format(valid_accuracy))
                        print("valid_loss : {}".format(valid_loss))

                batch_i += 1
示例#8
0
from keras.optimizers import adam
from keras.utils import np_utils
from model import mlp

np.random.seed(1671)
path = "data/GBPUSD.csv"

Data = pd.read_csv(path,
                   names=[
                       'close', 'Open', 'High', 'Low', 'SMA200', 'EMA15',
                       'EMA12', 'EMA26', 'MACD',
                       'bollinger_band_upper_3sd_200',
                       'bollinger_band_lower_3sd_200', 'stochK', 'stockD'
                   ])

mlp(13, 4)

Num_epoch = 200
Batch_size = 128
Verbose = 1
Num_classes = 10
Optimiser = adam
N_hidden = 128
Validation_split = 0.2

(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
Reshaped = 784

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# normalise the data
示例#9
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

#parameters
input_dim = 22 * 4
output_dim = 12
n_run = 25
model_name = 'wc_predicter.pt'
train_path = 'data/Train_data/'
train_frame = 200
test_path = 'data/Test_data/'
predict_path = 'data/Test_ML_MD/'
predict_frame = 20
test_frame = 50
atype_dict = {'O': 0, 'H1': 1, 'H2': 2, 'WC0': 3, 'WC1': 4, 'WC2': 5, 'WC3': 6}
atype_inv = {0: 'O', 1: 'H1', 2: 'H2'}

#create model
model = mlp(input_dim=122 * 4, output_dim=12).to(device)

if run_type == 'train':
    train(model, atype_dict, n_run, model_name, train_path, train_frame,
          test_path, test_frame)
else:
    resume(model_name, model)
    model.eval()
    make_prediction(model,
                    predict_path,
                    predict_frame,
                    atype_dict,
                    savedir='predicted_temp')
示例#10
0
def main(unused_argv):
    mnist = tf.contrib.learn.datasets.load_dataset('mnist')

    if FLAGS.job_name is None or FLAGS.job_name == '':
        raise ValueError('Must specify an explicit job_name !')
    else:
        print 'job_name : %s' % FLAGS.job_name
    if FLAGS.task_index is None or FLAGS.task_index == '':
        raise ValueError('Must specify an explicit task_index!')
    else:
        print 'task_index : %d' % FLAGS.task_index

    ps_spec = FLAGS.ps_hosts.split(',')
    worker_spec = FLAGS.worker_hosts.split(',')

    # 创建集群
    cluster = tf.train.ClusterSpec({'ps': ps_spec, 'worker': worker_spec})
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index,
                             config=config)
    if FLAGS.job_name == 'ps':
        server.join()

    is_chief = (FLAGS.task_index == 0)
    with tf.device(tf.train.replica_device_setter(cluster=cluster)):
        global_step = tf.Variable(0, name='global_step', trainable=False)
        x = tf.placeholder(tf.float32,
                           [None, FLAGS.image_pixels * FLAGS.image_pixels])
        y_ = tf.placeholder(tf.int32, [None])

        logits = model.mlp(x, FLAGS.image_pixels, FLAGS.hidden_units,
                           FLAGS.num_classes)
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y_, logits=logits)
        # accuracy
        accuracy = tf.metrics.accuracy(y_, tf.argmax(logits, axis=1))

        train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
            cross_entropy, global_step=global_step)

        # 生成本地的参数初始化操作init_op
        init_op = tf.global_variables_initializer()

    #train_dir = tempfile.mkdtemp()
    train_dir = FLAGS.train_dir
    # Supervisor is deprecated, using "MonitoredTrainingSession" instead
    # logdir: checkpoint save dir
    sv = tf.train.Supervisor(is_chief=is_chief,
                             logdir=train_dir,
                             init_op=init_op,
                             recovery_wait_secs=1,
                             global_step=global_step)

    if is_chief:
        print 'Worker %d: Initailizing session...' % FLAGS.task_index
    else:
        print 'Worker %d: Waiting for session to be initaialized...' % FLAGS.task_index

    sess = sv.prepare_or_wait_for_session(server.target)
    print 'Worker %d: Session initialization  complete.' % FLAGS.task_index

    time_begin = time.time()
    print 'Traing begins @ %f' % time_begin

    local_step = 0
    while True:
        batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
        train_feed = {x: batch_xs, y_: batch_ys}

        _, step = sess.run([train_op, global_step], feed_dict=train_feed)
        local_step += 1

        now = time.time()
        print '%f: Worker %d: traing step %d done (global step:%d)' % (
            now, FLAGS.task_index, local_step, step)

        if step >= FLAGS.train_steps:
            break

    time_end = time.time()
    print 'Training ends @ %f' % time_end
    train_time = time_end - time_begin
    print 'Training elapsed time:%f s' % train_time

    if is_chief:
        val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
        val_xent, val_accuracy = sess.run([cross_entropy, accuracy],
                                          feed_dict=val_feed)
        print 'After %d training step(s)' % (FLAGS.train_steps)
        print('cross_entropy:')
        print(np.mean(val_xent))
        print("accuracy:")
        print(val_accuracy)
    sess.close()
示例#11
0
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.stats.proportion import proportion_confint
from model import logistic, xgb, mlp
warnings.filterwarnings("ignore")

# load data
X_train = np.load("data/train.npy")
y_train = np.load("data/train_labels.npy")
X_test = np.load("data/test.npy")
y_test = np.load("data/test_labels.npy")

# print data shape
print("Training data shape: {}".format(X_train.shape))
print("Training labels shape: {}".format(y_train.shape))
print("Testing data shape: {}".format(X_test.shape))
print("Testing labels shape: {}".format(y_test.shape))

yhat, probs = mlp(X_train, y_train, X_test)

# accuracy

# confidence interval

# classification report

# confusion matrix

# roc_auc

# fpr, tpr
def train(speckle_path,
          image_path,
          check_point,
          decoder_path,
          save_path,
          code_length=128,
          lr=1e-3,
          batch_size=64,
          epochs=100,
          use_pretrain=True,
          model_type='autoencoder'):
    X_train = np.load(speckle_path[0])
    Y_train = np.load(image_path[0])
    X_val = np.load(speckle_path[1])
    Y_val = np.load(image_path[1])

    if model_type == 'autoencoder':
        E = encoder(code_length=code_length)

        if use_pretrain:
            D = load_model(decoder_path)
        else:
            D = decoder(code_length=code_length)

        model = encoder_decoder(E, D)

    elif model_type == 'unet':
        model = unet()

    elif model_type == 'mlp':
        model = mlp(input_length=1024,
                    hidden_units=1024,
                    hidden_layers=7,
                    activation="relu",
                    batchnorm=True,
                    dropout=0.1)

        X_train = np.reshape(X_train, (3500, 16384))
        # Y_train = np.reshape(Y_train, (3500, 1024))
        X_val = np.reshape(X_val, (500, 16384))
        # Y_val = np.reshape(Y_val, (500, 1024))

        pca = PCA(n_components=1024)
        X_train = pca.fit_transform(X_train)
        X_val = pca.transform(X_val)

        flatten = False

    adam = Adam(lr=lr)
    model.compile(optimizer=adam, loss='mse')
    model.summary()
    history = model.fit(X_train,
                        Y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_data=(X_val, Y_val),
                        callbacks=[Test(X_val, Y_val, check_point, flatten)],
                        shuffle=True)

    np.save(save_path + 'loss.npy', history.history['loss'])
    np.save(save_path + 'val_loss.npy', history.history['val_loss'])

    X_test = np.load(speckle_path[2])
    Y_test = np.load(image_path[2])

    if model_type == 'mlp':
        X_test = np.reshape(X_test, (1000, 16384))
        # Y_test = np.reshape(Y_test, (1000, 1024))
        X_test = pca.transform(X_test)

    model = load_model(check_point)
    print("final result:")
    a, b, c = assess(model, X_test, Y_test, flatten)
示例#13
0
def main():
    # ---------- general parameters ----------
    epsilon = 0.1  # noise multiplier
    batch_size = 64
    adversary_batches = 5  # number of adversary batches to create
    epochs = 5  # number of training / testing epochs

    # ---------- model definition ----------
    # getting batches to train
    x_train, y_train, x_test, y_test, n_labels = load_datasets(batch_size)

    # setting placeholders
    x_ph = tf.placeholder(tf.float32,
                          shape=[batch_size, 28, 28],
                          name='X_input')
    y_ph = tf.placeholder(tf.int32, shape=[batch_size], name='Y_input')

    # defining model, loss, regularizer, and optimizer
    regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
    model = mlp(x_ph, n_labels, regularizer=regularizer)
    loss_func = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_ph,
                                                               logits=model,
                                                               name='loss')
    reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    reg_term = tf.contrib.layers.apply_regularization(regularizer,
                                                      reg_variables)
    loss = loss_func + reg_term
    optimizer = tf.train.AdamOptimizer().minimize(loss)

    # variable initializer
    init = tf.global_variables_initializer()

    # ---------- training and generating examples ----------
    with tf.Session() as sess:
        sess.run(init)
        # training the network
        for epoch in range(epochs):
            for data, labels in zip(x_train, y_train):
                sess.run([loss, optimizer, model],
                         feed_dict={
                             x_ph: data,
                             y_ph: labels
                         })

        # ---------- generating adversarial examples ----------
        # 1. choosing batches of images out of the test set

        inds = np.random.randint(len(x_test), size=adversary_batches)
        data_set = [x_test[ind] for ind in inds]
        label_set = [y_test[ind] for ind in inds]

        # 2. calculating the sign of the gradient of the loss function with respect to the images
        gradients = tf.gradients(loss, [x_ph])
        s_gradients = tf.sign(gradients)

        original_accuracies = list()
        adversarial_accuracies = list()
        examples = list()
        for data, labels in zip(data_set, label_set):
            s_grads = sess.run([s_gradients],
                               feed_dict={
                                   x_ph: data,
                                   y_ph: labels
                               })
            s_grads = np.reshape(s_grads[0], s_grads[0].shape[1:])

            # 3. creating the adversarial versions of the images in the batch
            adversarial_images = data + epsilon * s_grads

            # 4. checking accuracy on original images
            logits = sess.run([model], feed_dict={x_ph: data})
            logits = logits[0]
            original_accuracies.append(calculate_accuracy(logits, labels))

            # 5. checking accuracy on adversarial images
            logits = sess.run([model], feed_dict={x_ph: adversarial_images})
            logits = logits[0]
            adversarial_accuracies.append(calculate_accuracy(logits, labels))

            if len(examples) < 2:
                examples.append(data[0])
                examples.append(adversarial_images[0])

        # 6. comparison and showing examples
        print('average accuracy of original images:',
              np.mean(original_accuracies))
        print('average accuracy of adversarial images:',
              np.mean(adversarial_accuracies))
        plt.figure()
        plt.imshow(examples[0], cmap='gray')
        plt.figure()
        plt.imshow(examples[1], cmap='gray')
        plt.show()