Ejemplo n.º 1
0
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
    """
    Load & generate training examples from multivariate time series data
    :return: data iters & variables required to define network architecture
    """
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['v(pad)'], shape='ncw')
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vi)'])
    _data, _label = gen_data(filename='1cycle_iv_small.txt',
                             input_list=['v(i)'],
                             output_list=['v(pad)'])
    global data_scale, label_scale
    #data_scale = np.max(_data)
    #label_scale = np.max(_label)
    print("Scale: %.2e %.2e" % (data_scale, label_scale))
    _data = _data / data_scale  # scale
    _label = _label / label_scale  # scale
    _data = np.concatenate([_data] * 200)
    _label = np.concatenate([_label] * 200)

    _data = np.atleast_3d(_data)
    _label = np.atleast_3d(_label)
    data_len = len(_data)
    print("Shape: ", _data.shape)  # (samples, seq_len, features)
    #sys.exit(0)

    m = int(splits[0] * data_len)
    m += m % batch_size
    k = int(splits[1] * data_len)
    k += k % batch_size

    idx = np.random.choice(data_len, size=data_len, replace=False)
    train_idx = idx[:m]
    val_idx = idx[m:m + k]
    test_idx = idx[m + k:]

    #X = _data[:m]
    #y = _label[:m]
    X = _data[train_idx, :]
    y = _label[train_idx, :]
    train_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("train_data shape: ", X.shape, y.shape)

    #X = _data[m:m+k]
    #y = _label[m:m+k]
    X = _data[val_idx, :]
    y = _label[val_idx, :]
    val_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #val_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("val_data shape: ", X.shape, y.shape)

    #X = _data[m+k:]
    #y = _label[m+k:]
    X = _data[test_idx, :]
    y = _label[test_idx, :]
    test_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #test_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=1, shuffle=False)
    print("test_data shape: ", X.shape, y.shape)
    return train_iter, val_iter, test_iter
Ejemplo n.º 2
0
def launch():
    if request.method == 'POST':
        data = request.get_data()
        hlo_module = hlo_pb2.HloProto()
        hlo_module.ParseFromString(data)
        res = gen_data(hlo_module.hlo_module)
        instruction_feats = tf.convert_to_tensor(res["instruction_feats"], dtype=tf.float32)
        computation_feats = tf.convert_to_tensor(res["computation_feats"], dtype=tf.float32)
        final_feats = tf.convert_to_tensor(res["final_feats"], dtype=tf.float32)
        instruction_edge_feats = tf.convert_to_tensor(res["instruction_edge_feats"], dtype=tf.float32)
        call_computation_edge_feats = tf.convert_to_tensor(res["call_computation_edge_feats"],
                                                           dtype=tf.float32)
        in_computation_edge_feats = tf.convert_to_tensor(res["in_computation_edge_feats"], dtype=tf.float32)
        to_final_edge_feats = tf.convert_to_tensor(res["to_final_edge_feats"], dtype=tf.float32)

        input = [instruction_feats, computation_feats, final_feats, instruction_edge_feats,call_computation_edge_feats, in_computation_edge_feats, to_final_edge_feats]
        graph = res["graph"]
        my_lock.acquire()
        model.set_graph(graph)
        ranklogit = model(input, training=False)
        my_lock.release()
        ranklogit = tf.math.reduce_mean(ranklogit).numpy()

        req = {
            "code": "0000",
            "result": str(ranklogit),
        }
        return str(ranklogit)
Ejemplo n.º 3
0
def get_data():
    stuid = request.args.get('stuid')
    if stuid is None:
        return jsonify({'error': '学号不能为空'})
    challenge_data, redis_data = gen_data()
    data_id = challenge_data['uuid']
    redis_data['stuid'] = stuid
    r.set(data_id, json.dumps(redis_data), ex=3600)
    return jsonify(challenge_data)
Ejemplo n.º 4
0
def main():
    # no meaningful results
    #_data, _label = gen_data(filename='500cycle.txt',input_list=['v(i)'], output_list=['v(pad)'], seq_len=8) # multiple cycle won't help at all
    #_data, _label = gen_data(filename='1cycle_short.txt',input_list=['v(i)'], output_list=['v(pad)']) 
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['v(pad)'])  # rload not good
    _data, _label = gen_data(filename='1cycle_iv.txt',input_list=['v(i)'], output_list=['i(vpad)']) 
    _data = np.concatenate([_data]*10) # duplicate
    _label = np.concatenate([_label]*10)
    window_size = len(_data[0]) # actually it's sequence length or num_steps
    evaluate_timeseries_with_label(_data, _label, window_size)
Ejemplo n.º 5
0
def main():
    #_data, _label = gen_data(filename='1cycle_iv_small.txt',input_list=['v(i)'], output_list=['v(pad)'])
    _data, _label = gen_data(filename='1cycle_iv_small_100p.txt',
                             input_list=['v(i)'],
                             output_list=['i(vi)'])
    #_data, _label = gen_data(filename='1cycle_iv_small_10p.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    _data = np.concatenate([_data] * 60)
    _label = np.concatenate([_label] * 60)
    global data_scale, label_scale
    data_scale = normalize(_data, axis=1)
    label_scale = normalize(_label, axis=1)

    window_size = len(_data[0])  # actually it's sequence length or num_steps
    evaluate_timeseries_with_label(_data, _label, window_size)
Ejemplo n.º 6
0
def main():
    #_data, _label = gen_data(filename='1cycle.txt',input_list=['v(i)'], output_list=['v(pad)'])
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vi)'])
    _data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    global data_scale, label_scale
    data_scale = np.max(_data)
    label_scale = np.max(_label)
    _data = _data/data_scale
    _label = _label/label_scale
    _data = np.concatenate([_data]*3)
    _label = np.concatenate([_label]*3)

    window_size = len(_data[0]) # actually it's sequence length or num_steps
    evaluate_timeseries_with_label(_data, _label, window_size)
Ejemplo n.º 7
0
def plot_raw(f, input_list, output_list, sweep=None):
    _data, _label = gen_data(filename=f,
                             input_list=input_list,
                             output_list=output_list)
    assert (_data.shape[2] == 1)
    assert (_label.shape[2] == 1)
    _data = _data.squeeze(axis=-1)
    _label = _label.squeeze(axis=-1)
    print(_data.shape)
    sweepNo = _data.shape[0]
    if sweep is None:
        sweep = np.random.choice(sweepNo, size=10)
    else:
        assert (isinstance(sweep, list))
    print("Sweeps: ", sweep)
    for i in sweep:
        indexes = np.arange(_data[i].size)
        plt.figure(i)
        #plt.plot(indexes, _data[i], label='X')
        plt.plot(indexes, _label[i], label='y')
        plt.legend(loc='upper right')
    plt.show()
Ejemplo n.º 8
0
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
    """
    Load & generate training examples from multivariate time series data
    :return: data iters & variables required to define network architecture
    """
    _data, _label = gen_data(filename='500cycle.txt',
                             input_list=['v(i)', 'i(vi)'],
                             output_list=['v(pad)', 'i(vpad)'],
                             seq_len=q)
    #_data, _label = gen_data(filename='1cycle.txt',input_list=['v(i)'], output_list=['v(pad)']) # doesn't work because of shape or else?
    _data = np.atleast_3d(_data)
    _label = np.atleast_3d(_label)
    data_len = len(_data)
    print("Shape: ", _data.shape)
    #sys.exit(0)

    m = int(splits[0] * data_len)
    m += m % batch_size
    k = int(splits[1] * data_len)
    k += k % batch_size

    X = _data[:m]
    y = _label[:m]
    train_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("train_data shape: ", X.shape, y.shape)

    X = _data[m:m + k]
    y = _label[m:m + k]
    val_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #val_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("val_data shape: ", X.shape, y.shape)

    X = _data[m + k:]
    y = _label[m + k:]
    test_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #test_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=1, shuffle=False)
    print("test_data shape: ", X.shape, y.shape)
    return train_iter, val_iter, test_iter
def train():
    x=[]
    y=[]
    na=[]
    for i in test:
        temp={}
        temp["off"]=i["off"]
        temp["UID"]=i["UID"]
        x.append(temp)
        y.append(np.concatenate([question,np.array(i["words"])],axis=0))
        na.append(np.ones(shape=(len(question)+len(i["words"]))))
    train_F=gen_data("None",'f','f',na=na,da=y)
    evaluatorf=Evaluate(x,y,na,train_F)
    ep=150
    now=0
    increa=0.004
    while now<ep and len(train_F)!=0 :
          if os.path.exists("decoder_train_model.weights"):
              print("loading........")
              decoder_train_model.load_weights('decoder_train_model.weights')
          thistory=decoder_train_model.fit_generator(train_F.__iter__(),
                                          steps_per_epoch=len(train_F),
                                          epochs=1,
                                          callbacks=[evaluatorf]
                                          )
          hi=np.mean(thistory.history["loss"])
          print("loss:",hi)
          print("save....")
          decoder_train_model.save_weights('decoder_train_model.weights')
          print(now)
          evaluatorf.updata_increa(increa)
          now=now+1
          if hi<0.01:
              break
    x=evaluatorf.ret()
    for t1 in range(len(x)):
        x[t1]["words"]=(fenlei_model.predict(np.array(x[t1]["words"]).reshape(1,1,word_size)))[-1][-1]
    return x
Ejemplo n.º 10
0
from tensorflow.python.ops.nn import dynamic_rnn, sparse_softmax_cross_entropy_with_logits

import data

tb_log_freq = 500
save_freq = 500
valid_every = 500
max_to_keep = 2000
batch_size = 512
num_classes = 2
num_iterations = 50001
num_features = 64
learning_rate = 0.0001
clip_norm = 1

data_gen = lambda split, train: data.gen_data(
    split, num_iterations=num_iterations, batch_size=batch_size, train=train)


def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [
            None,
        ])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(205)
            g, h = h[:, :, :dim], h[:, :, dim:]
            g = K.in_train_phase(K.dropout(g, dropout_rate), g)
            g = K.sigmoid(g)
            return g * s + (1 - g) * h
        seq = Lambda(_gate)([seq, h])
        return seq


filelist=[]
fend1=3
fend2=2

filelist.append("phaseB_dry-run_.json")


train_D=gen_data(filelist,"f")
datae=copy.deepcopy(train_D.data)
Ne=copy.deepcopy(train_D.N)
tage=copy.deepcopy(train_D.tag)
word_size=train_D.word_size



t_in=Input(shape=(None,word_size*2))
n1_in=Input(shape=(None,))
n2_in=Input(shape=(None,))
t,n1,n2=t_in,n1_in,n2_in

td=dilated_gated_conv1d(t,1)
td=dilated_gated_conv1d(td,1)
td=dilated_gated_conv1d(td,1)
Ejemplo n.º 12
0
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(MyEncoder, self).default(obj)

if args.ckpt_path_thres:
    with open('initial_thresholds.json', 'r') as f:
        THRESHOLDS = json.load(f)

    print("Eval Initial Thresholds from ", args.ckpt_path_thres)

    thr_input_node,_ = gen_data('train',args.dataset,args.batch_size) 
    _, quant_model = ct.create_adjustable_model(thr_input_node, WEIGHTS, TEA_WEIGHTS, THRESHOLDS, r_alpha, r_beta, tea_model=args.tea_model, weight_bits=args.weight_bits, act_bits=args.act_bits, swish_bits=args.swish_bits, bias_bits=args.bias_bits, weight_const=args.weight_const, bits_trainable=args.bits_trainable, mix_prec=args.mix_prec)

    with tf.Session(graph=quant_model.graph) as sess:
        saver_e = tf.train.Saver()
        saver_e.restore(sess, args.ckpt_path_thres)

        coord = tf.train.Coordinator() 
        threads = tf.train.start_queue_runners(sess=sess,coord=coord)

        THRESHOLDS = eval_thresholds(sess, quant_model.input_node,
                                     quant_model.reference_nodes, args.batch_size)

        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 13
0
import tensorflow as tf
import numpy as np
from data import gen_data
rng = np.random
import pdb

# FAKE DATA
sigmoid = lambda x: 1. / (1. + np.exp(-x))

N_PROD = 101
D_PROD = 13

N_PERSON = 17
D_PERSON_STYLE = D_PROD

data = gen_data(N_PERSON, N_PROD, D_PROD)


#### TENSORFLOW MODEL
def ph(shape, dtype=tf.float32):
    return tf.placeholder(dtype, shape)


with tf.name_scope('product') as scope:
    prod_embeddings = tf.Variable(data['prod_vecs'].astype(
        np.float32))  # learned codes for each product

# placeholders are network inputs
with tf.name_scope('user') as scope:
    i_user = ph((None), tf.int32)  # user index
    i_user_swiped = ph((None),
Ejemplo n.º 14
0
    SNR = 10
    # signal-to-interference ratio
    SIR = 0

    # If tied = True, then parameters are shared across layers
    # If tied = False, then each layer has its own parameters
    tied = False

    # number of stages
    # num_stages = 3
    for num_stages in [1, 2, 3, 4, 5, 6]:
        dims_list = [(n, m), (m, n), (n, m)]

        Ntrain = 10**5
        Ntest = 10**3
        data_test = data.gen_data(Ntest, p, s1, s2, SNR, SIR, True, 'large')
        data_train = data.gen_data(Ntrain, p, s1, s2, SNR, SIR, True, 'large')

        # Path to folder where to save network. If None, then network not saved
        savepath = '/Users/jeremyjohnston/Documents/admm-net-0/nets/' + 'untied_' + str(
            num_stages) + 'layer'
        # time.strftime("%m_%d_%Y_%H_%M_%S",time.localtime())

        learning_rate = 10 * [1e-3] + 10 * [1e-4]
        # learning_rate = 50*[1e-3] + 50*[1e-4]
        batch_size = 100

        # Create new, initialized network
        net = admm_net.ADMMNet(p, num_stages, tied)
        # net = admm_net_tied.ADMMNet(p, num_stages)
        # net = bbnet.BBNet(p, dims_list)
Ejemplo n.º 15
0
Archivo: train.py Proyecto: monkidea/TF
import data
import model

if __name__ == '__main__':
    # generate dataset
    X, Y = data.gen_data()

    # hyper-parameters
    seqlen = X.shape[-1]
    num_classes = 20  # binary
    state_size = 16  # num of units in rnn's internal state
    batch_size = 128
    epochs = 100000  # 100_000 (need this <- python 3.6)
    learning_rate = 0.1

    # build model
    net = model.ManyToOne(seqlen=seqlen,
                          num_classes=num_classes,
                          state_size=state_size,
                          batch_size=batch_size,
                          epochs=epochs,
                          learning_rate=learning_rate,
                          ckpt_path='ckpt/')

    # build batch generator for training
    train_set = data.rand_batch_gen(X, Y, batch_size)

    # train model
    sess, last_step, train_losses = net.train(train_set)

    print('\n>> After training')
Ejemplo n.º 16
0
        avg_result.append(np.average([result[i] for result in all_results], 0))
    for line_result in avg_result:
        print_list(line_result)
    return avg_result


def print_avg_cand(sample_list):
    cand_lengths = []
    for sample in sample_list:
        cand_lengths.append(len(sample[1]))
    print('avg cand size:', np.average(cand_lengths))


if __name__ == '__main__':
    training_data, testing_data, valid_data, all_relations, vocabulary, \
        embedding=gen_data()
    cluster_labels, rel_features = cluster_data(num_clusters)
    to_use_embed = rel_features
    #to_use_embed = bert_rel_features
    random.seed(random_seed)
    start_time = time.time()
    all_results = []
    result_all_test_data = []
    seeds = [0, 10, 20]
    for seed in seeds:
        for i in range(sequence_times):
            shuffle_index = list(range(num_clusters))
            random_seed = seed + 100 * i
            random.seed(random_seed)
            #random.seed(random_seed+100*i)
            random.shuffle(shuffle_index)
    seq = Lambda(_gate)([seq, h])
    return seq


filelist = []
fend1 = 3
fend2 = 2
for i in range(1, fend1):
    for i1 in range(1, fend2):
        if i == 1:
            if i1 == 4:
                break
        filelist.append("phaseB_" + str(i) + "b_0" + str(i1) + ".json")
filelist.append("phaseB_1b_01F.json")

train_D = gen_data(filelist)
datae = copy.deepcopy(train_D.data)
Ne = copy.deepcopy(train_D.N)
tage = copy.deepcopy(train_D.tag)
word_size = train_D.word_size

t_in = Input(shape=(None, word_size * 2))
tag_in = Input(shape=(None, ))
n1_in = Input(shape=(None, ))
n2_in = Input(shape=(None, ))
t, tagm, n1, n2 = t_in, tag_in, n1_in, n2_in

td = dilated_gated_conv1d(t, 1)
td = dilated_gated_conv1d(td, 2)
td = dilated_gated_conv1d(td, 5)
td = dilated_gated_conv1d(td, 1)
Ejemplo n.º 18
0
def build_iters(filename, input_list, output_list, splits, batch_size):
    """
    Load & generate training examples from multivariate time series data
    :return: data iters & variables required to define network architecture
    """
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['v(pad)'], shape='ncw')
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vpad)'])
    #_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vi)'])
    #_data, _label = gen_data(filename='1cycle_iv_small_100p.txt',input_list=['v(i)'], output_list=['v(pad)'], shape='ncw')
    _data, _label = gen_data(filename='1cycle_iv_small_100p.txt',
                             input_list=['v(i)'],
                             output_list=['i(vpad)'],
                             shape='ncw')
    global num_samples, sequence_length, num_channel
    num_samples, num_channel, sequence_length = _data.shape
    _data = np.concatenate([_data] * 200)
    _label = np.concatenate([_label] * 200)
    data_scale = normalize(_data, axis=2)
    label_scale = normalize(_label, axis=2)

    _data = np.atleast_3d(_data)
    _label = np.atleast_3d(_label)
    data_len = len(_data)
    print("Shape: ", _data.shape)  # (samples, seq_len, features)
    #sys.exit(0)

    m = int(splits[0] * data_len)
    m += m % batch_size
    k = int(splits[1] * data_len)
    k += k % batch_size

    idx = np.random.choice(data_len, size=data_len, replace=False)
    train_idx = idx[:m]
    val_idx = idx[m:m + k]
    test_idx = idx[m + k:]

    #X = _data[:m]
    #y = _label[:m]
    X = _data[train_idx, :]
    y = _label[train_idx, :]
    train_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("train_data shape: ", X.shape, y.shape)

    #X = _data[m:m+k]
    #y = _label[m:m+k]
    X = _data[val_idx, :]
    y = _label[val_idx, :]
    val_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #val_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=False)
    print("val_data shape: ", X.shape, y.shape)

    #X = _data[m+k:]
    #y = _label[m+k:]
    X = _data[test_idx, :]
    y = _label[test_idx, :]
    global eval_data_scale, eval_label_scale
    eval_data_scale = data_scale[test_idx, :]
    eval_label_scale = label_scale[test_idx, :]
    test_iter = mx.io.NDArrayIter(data=X, label=y, batch_size=batch_size)
    #test_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=1, shuffle=False)
    print("test_data shape: ", X.shape, y.shape)
    return train_iter, val_iter, test_iter
Ejemplo n.º 19
0
def main():
    assert (torch.cuda.is_available())

    # Get profile
    p = get_profile()

    # Get forward model
    forward_model = p.get_forward_model()

    # Load Model
    print('===> Loading model')
    model, model_dir, initial_epoch = p.get_model()

    # Loss Function Selection
    criterion = p.loss()

    # Optimizer Selection & Setup
    optimizer = optim.Adam(model.parameters(), lr=p.lr)
    scheduler = MultiStepLR(optimizer, milestones=[30, 60, 90],
                            gamma=0.2)  # learning rates

    # Cuda Setup
    model = model.cuda()
    # device_ids = [0]
    # model = nn.DataParallel(model, device_ids=device_ids).cuda()
    # criterion = criterion.cuda()

    # Load Data
    print('===> Loading data')
    xs = dg.gen_data(p)
    dataset = GenericDataset(xs, forward_model)

    # Training
    model.train()

    print('===> Training')
    for epoch in range(initial_epoch, p.epoch):

        scheduler.step(epoch)  # step to the learning rate in this epoch

        # Setup Data Loader
        DLoader = DataLoader(dataset=dataset,
                             num_workers=4,
                             drop_last=True,
                             batch_size=p.batch_size,
                             shuffle=True)

        epoch_loss = 0

        # Process Batches
        start_time = time.time()
        for n_count, batch_yx in enumerate(DLoader):

            optimizer.zero_grad()
            batch_x, batch_y = batch_yx[1].cuda(), batch_yx[0].cuda()

            loss = criterion(model(batch_y), batch_x)  # Calculate Loss
            epoch_loss += loss.item()

            loss.backward()  # Calculate gradients
            optimizer.step()  # Update parameters

            if n_count % 10 == 0:
                print('%4d %4d / %4d loss = %2.4f' %
                      (epoch + 1, n_count, xs.shape[0] // p.batch_size,
                       loss.item() / p.batch_size))

        elapsed_time = time.time() - start_time

        # Log and Save
        print_log('epoch = %4d , loss = %4.4f , time = %4.2f s' %
                  (epoch + 1, epoch_loss / n_count, elapsed_time))
        np.savetxt('train_result.txt',
                   np.hstack((epoch + 1, epoch_loss / n_count, elapsed_time)),
                   fmt='%2.4f')
        # torch.save(model.state_dict(), os.path.join(save_dir, 'model_%03d.pth' % (epoch+1)))
        torch.save(model,
                   os.path.join(model_dir, 'model_%03d.pth' % (epoch + 1)))
Ejemplo n.º 20
0
                    type=int,
                    default=1111,
                    help='random seed (default: 1111)')
args = parser.parse_args()

torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

print("Producing data...")
#_data, _label = gen_data(filename='1cycle_iv_2.txt',input_list=['v(i)'], output_list=['i(vi)'], shape='ncw')
_data, _label = gen_data(filename='1cycle_iv_small.txt',
                         input_list=['v(i)'],
                         output_list=['v(pad)'],
                         shape='ncw')
_label = _label.squeeze()
data_scale = np.max(_data)
label_scale = np.max(_label)
print("Scale: %.2e %.2e" % (data_scale, label_scale))
_data = _data / data_scale  # scale
_label = _label / label_scale  # scale
_data = np.concatenate([_data] * 20)
_label = np.concatenate([_label] * 20)

data_len = len(_data)
seq_len = len(_data[0][0])
idx = np.random.choice(data_len, size=data_len, replace=False)
m = int(0.8 * data_len)
k = int(0.1 * data_len)
Ejemplo n.º 21
0
import data

tb_log_freq = 500
save_freq = 500
valid_every = 500
max_to_keep = 2000
batch_size = 512
num_classes = 2
num_iterations = 50001
num_features = 64
learning_rate = 0.0001
clip_norm = 1


data_gen = lambda split, train: data.gen_data(split, num_iterations=num_iterations, batch_size=batch_size, train=train)

def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(205)
        cell_bw = tf.nn.rnn_cell.GRUCell(205)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
Ejemplo n.º 22
0
tb_log_freq = 50
save_freq = 50
valid_every = 50
max_to_keep = 200
batch_size = 64
number_inputs = 42
number_outputs = 8
num_iterations = 5001
learning_rate = 0.001
clip_norm = 1

num_units_encoder = 400
num_units_l1 = 200
num_units_l2 = 200

data_gen = data.gen_data(num_iterations=num_iterations, batch_size=batch_size)


def model(crf_on):
    print("building model ...")
    with tf.variable_scope('train'):
        print("building train ...")
        # setup
        X_input = tf.placeholder(tf.float32,
                                 shape=[None, None, number_inputs],
                                 name='X_input')
        X_length = tf.placeholder(tf.int32, shape=[
            None,
        ], name='X_length')
        t_input = tf.placeholder(tf.int32, shape=[None, None], name='t_input')
        t_input_hot = tf.one_hot(t_input, number_outputs)
Ejemplo n.º 23
0
    model = Model(4, 2, 2, 3, 8, records[0]["op_table"])
    model.load_weights('weights')

    gdef, prof_data = load("vgg.pickle")

    for bandwidth in (2**n for n in range(8, 20)):
        topo = gen_topo([
            ("/job:worker/replica:0/task:0/device:GPU:0", 1, 12 << 30),
            ("/job:worker/replica:0/task:0/device:GPU:1", 1, 12 << 30),
            ("/job:worker/replica:0/task:0/device:GPU:2", 1, 12 << 30),
            ("/job:worker/replica:0/task:0/device:GPU:3", 1, 12 << 30),
            ("/job:worker/replica:0/task:1/device:GPU:0", 1, 12 << 30),
        ],
                        intra=bandwidth,
                        inter=100)
        record = gen_data(gdef, prof_data, topo, records[0]["op_table"])

        cnfeats = tf.convert_to_tensor(record["cnfeats"], dtype=tf.float32)
        cefeats = tf.convert_to_tensor(record["cefeats"], dtype=tf.float32)
        cntypes = tf.convert_to_tensor(record["cntypes"], dtype=tf.float32)
        tnfeats = tf.convert_to_tensor(record["tnfeats"], dtype=tf.float32)
        tefeats = tf.convert_to_tensor(record["tefeats"], dtype=tf.float32)
        model.set_graphs(record["cgraph"], record["tgraph"])
        model.set_groups(record["cgroups"], record["tgroups"])

        logp = model([cnfeats, cefeats, cntypes, tnfeats, tefeats])
        p = np.argmax(logp.numpy()[:, 1:], axis=1)
        count = {}
        for i in range(p.shape[0]):
            d = (1 if logp.numpy()[i, 0] > .5 else 0), p[i]
            count[d] = count.get(d, 0) + 1
Ejemplo n.º 24
0
                           reverse_question_indexs, reverse_relation_indexs,
                           question_lengths, relation_lengths)
        start_index = 0
        pred_indexs = []
        #print('len of relation_set:', len(relation_set_lengths))
        for j in range(len(relation_set_lengths)):
            length = relation_set_lengths[j]
            cand_indexs = samples[j][1]
            pred_index = (cand_indexs[all_scores[start_index:start_index +
                                                 length].argmax()])
            if pred_index == gold_relation_indexs[j]:
                num_correct += 1
            #print('scores:', all_scores[start_index:start_index+length])
            #print('cand indexs:', cand_indexs)
            #print('pred, true:',pred_index, gold_relation_indexs[j])
            start_index += length
    #print(cand_scores[-1])
    #print('num correct:', num_correct)
    #print('correct rate:', float(num_correct)/len(testing_data))
    return float(num_correct) / len(testing_data)


if __name__ == '__main__':
    model = torch.load(model_path)
    training_data, testing_data, valid_data,\
        all_relations, vocabulary,  embedding = gen_data()
    model.init_embedding(np.array(embedding))
    acc = evaluate_model(model, testing_data, batch_size, all_relations,
                         device)
    print('accuracy:', acc)
Ejemplo n.º 25
0
from mxnet.gluon import nn, rnn, contrib
from data import gen_data, save_plot
from Model import UpSampling1D

#Yu: This TCN is based on https://github.com/colincsl/TemporalConvolutionalNetworks

ctx = mx.gpu()

num_layers = 1  # multi-layer
batch_size = 64  #
#sequence_length = 6 # seq len
num_hidden = 8  #

#_data, _label = gen_data(filename='500cycle.txt',input_list=['v(i)'], output_list=['v(pad)'], seq_len=6)
_data, _label = gen_data('1cycle_short.txt',
                         input_list=['v(i)'],
                         output_list=['v(pad)'])  # nwc
data_len = len(_data)
sequence_length = len(_data[0])
print("Len: ", data_len)

m = int(0.8 * data_len)
m += m % batch_size

X = _data[:m]
y = _label[:m]
train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y),
                                   batch_size=batch_size,
                                   shuffle=True)
print("train_data shape: ", X.shape, y.shape)
Ejemplo n.º 26
0
import data
import model

if __name__ == '__main__':
    # generate dataset
    X, Y = data.gen_data()

    # hyper-parameters
    seqlen = X.shape[-1]
    num_classes = 20 # binary
    state_size = 16  # num of units in rnn's internal state
    batch_size = 128
    epochs = 100000 # 100_000 (need this <- python 3.6)
    learning_rate = 0.1


    # build model
    net = model.ManyToOne(
            seqlen = seqlen,
            num_classes = num_classes,
            state_size = state_size,
            batch_size = batch_size,
            epochs = epochs,
            learning_rate = learning_rate,
            ckpt_path='ckpt/'
            )


    # build batch generator for training
    train_set = data.rand_batch_gen(X, Y, batch_size)
Ejemplo n.º 27
0
import matplotlib.pyplot as plt
import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
import pandas as pd
import os
from mxnet import gluon, autograd
from mxnet.gluon import nn, rnn, contrib
from data import gen_data, save_plot

ctx = mx.gpu()

batch_size = 16
#_data, _label = gen_data(filename='1cycle.txt',input_list=['v(i)'], output_list=['v(pad)'])
_data, _label = gen_data(filename='1cycle_short.txt',
                         input_list=['v(i)'],
                         output_list=['v(pad)'])
data_len = len(_data)
sequence_length = len(_data[0])
#num_hidden = int(sequence_length/2)
#num_hidden = 128
print("Len: ", data_len)

m = int(0.8 * data_len)
m += m % batch_size

X = _data[:m]
y = _label[:m]
train_iter = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y),
                                   batch_size=batch_size,
                                   shuffle=True)
Ejemplo n.º 28
0
tf.set_random_seed(1)

tb_log_freq = 50
save_freq = 500
valid_every = 50
max_to_keep = 200
batch_size = 2000
num_classes = 2
num_iterations = 1501
num_features = 64
learning_rate = 0.0001
clip_norm = 1


data_gen = lambda split: data.gen_data(split, num_iterations=num_iterations, batch_size=batch_size)

def model():
    tf.set_random_seed(1)
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        X_bn = batch_norm(X_pl, is_training=is_training_pl)
        print("X_bn", X_bn.get_shape())
        l1 = fully_connected(X_pl, num_outputs=100, activation_fn=relu)#, normalizer_fn=batch_norm)
        print("l1", l1.get_shape())
Ejemplo n.º 29
0
parser.add_argument('--seed',
                    type=int,
                    default=1111,
                    help='random seed (default: 1111)')
args = parser.parse_args()

torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

print("Producing data...")
_data, _label = gen_data(filename='1cycle_iv_2.txt',
                         input_list=['v(i)'],
                         output_list=['i(vi)'],
                         shape='ncw')
_label = _label.squeeze()
data_scale = np.max(_data)
label_scale = np.max(_label)
print("Scale: %.2e %.2e" % (data_scale, label_scale))
_data = _data / data_scale  # scale
_label = _label / label_scale  # scale

data_len = len(_data)
seq_len = len(_data[0][0])
idx = np.random.choice(data_len, size=data_len, replace=False)
m = int(0.8 * data_len)
k = int(0.1 * data_len)
train_idx = idx[:m]
val_idx = idx[m:m + k]
Ejemplo n.º 30
0
import data

tb_log_freq = 500
save_freq = 500
valid_every = 500
max_to_keep = 2000
batch_size = 512
num_classes = 2
num_iterations = 50001
num_features = 64
learning_rate = 0.0001
clip_norm = 1


data_gen = lambda split: data.gen_data(split, num_iterations=num_iterations, batch_size=batch_size)

def model():
    print("building model ...")
    with tf.variable_scope('train'):
        print("building model ...")
        X_pl = tf.placeholder(tf.float32, [None, num_features])
        X_expand = tf.expand_dims(X_pl, axis=2)
        print("X_pl", X_pl.get_shape())
        t_pl = tf.placeholder(tf.int32, [None,])
        print("t_pl", t_pl.get_shape())
        is_training_pl = tf.placeholder(tf.bool)
        cell_fw = tf.nn.rnn_cell.GRUCell(100)
        cell_bw = tf.nn.rnn_cell.GRUCell(100)
        seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
        _, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
Ejemplo n.º 31
0
import data, pr, info_fns, G_builder, misc

########## LOCAL PATHS ############

output_path = 'C:/Users/Crbn/Documents/MPRI M2/ReSys/project/output/'
hema_file = 'C:/Users/Crbn/Documents/MPRI M2/ReSys/project/data/wholecells_binary.csv'

########## MAIN PARAMETERS ###########

dataset='hema' #can also try others in data.py, such as '3NAND_AND_2OR'
cutoff=None
G=None
thresh_mult = 1

############# MAIN ################

dataa, gene_names = data.gen_data(set=dataset, include_stages=False, cutoff=cutoff, hema_file=hema_file)
dataa,gene_names = misc.preprocess(dataa, gene_names)
G = G_builder.build(dataa, gene_names, G=G, thresh_mult=thresh_mult)
G = misc.postprocess(G)
G = misc.assign_stages(G)

#print(G.nodes[edge[0]]['gene'], '->', G.nodes[edge[1]]['gene'])

misc.drawG(G,output_path) 
Ejemplo n.º 32
0
import mxnet.ndarray as nd
import numpy as np
import pandas as pd
import os
import time
from mxnet import gluon, autograd
from mxnet.gluon import nn, rnn
import mxnet.ndarray as F
from data import gen_data, save_plot

ctx = mx.gpu()

num_layers = 1    # multi-layer
batch_size = 16     # 

_data, _label = gen_data(filename='1cycle_iv_small_100p.txt',input_list=['v(i)'], output_list=['i(vi)']) 
data_scale = 1.0*np.max(_data)
label_scale = 1.0*np.max(_label)
data_scale = 1
label_scale = 1
_data = _data/data_scale
_label = _label/label_scale
#_data = np.concatenate([_data]*100)
#_label = np.concatenate([_label]*100)

sequence_length = len(_data[0])
num_hidden = sequence_length # too big will hang
#num_hidden = 64 # can't change num_hidden
print("Sequence_len: ", sequence_length)

data_len = len(_data)
Ejemplo n.º 33
0
from data import gen_data,read_origin_relation
from model import ClassifierModel
import pandas as pd
from settings import args,TASK_DICT,init_logging





def train(training_data, valid_data, vocabulary, embedding_dim, hidden_dim,











if __name__ == '__main__':
    if args.task == 'fewrel':
        training_data,testing_data,valid_data,all_relations,vocabulary,embedding = gen_data()
        print("finish gen_data, start train")
        train(training_data,valid_data,vocabulary,args.embedding_dim,args.hidden_dim,args.device,args.batch_size,args.learning_rate,args.model_dir,all_relations,model=None,epoch=args.train_epoch)
    if args.task == 'dbpedia':
        training_data, testing_data, valid_data, all_relations, vocabulary, embedding = gen_data()
        train(training_data, valid_data, vocabulary, args.embedding_dim, args.hidden_dim, args.device, args.batch_size,
              args.learning_rate, args.model_dir, all_relations, model=None, epoch=args.train_epoch)