Exemple #1
0
def main():
    params = {
        'nInput': inputNeurons,
        'nHidden': hiddenNeurons,
        'nOutput': outputNeurons,
        'activation': 'sigmoid'
    }

    n1 = network(params)
    #print n1

    #n1.setBias(3, -3.124334)

    #print n1
    #cRand = random.uniform(-10.0, 10.0)
    #n1.setWeight((1, 2, 2), cRand)
    #print "\n\n\n"

    #test xor input vector
    inputVector = [1.0, 1.0]

    try:
        n1.feed(inputVector)
    except Exception as e:
        print BR, "Exception: ", e, BR
 def setUp(self):
     activities = [activity() for number in range(9)]
     [activities[i].AssignID(i+1) for i in range(9)]
     [activities[i].AssignDuration(10) for i in range(9)]
     [activities[i].AssignDurationRange(MIN=5, ML=9, MAX=40) for i in range(9)]
     [activities[i].AssignSuccsesors(i+2) for i in range(8)]
     self.P = network()
     self.P.AddActivity(*activities)
 def test_Float1(self):
     """Tests total floats in network"""
     activities = [activity() for number in range(3)]
     [activities[i].AssignID(i+1) for i in range(3)]
     [activities[i].AssignDuration(duration) for duration, i in zip([5,10,1], range(3))]
     [activities[i].AssignSuccsesors(3) for i in range(3)]
     P = network()
     P.AddActivity(*activities)
     P.CalculateTotalFloats()
     calculated_floats = [act.GetSlack() for act in P]
     floats = [5,0,0]
     self.assertListEqual(calculated_floats, floats)
 def test_ProbabiliyOfCritical(self):
     """ Tests calculation of probability of criticality of an activity"""
     activities = [activity() for number in range(3)]
     [activities[i].AssignID(i+1) for i in range(3)]
     [activities[i].AssignDuration(duration) for duration, i in zip([100,100,100], range(3))]
     [activities[i].AssignDurationRange(min=50, ml=100, max=150) for i in range(3)]
     [activities[i].AssignSuccsesors(3) for i in range(2)]
     P = network()
     P.AddActivity(*activities)
     P.Simulate(1000)
     for i in range(1,4):
         self.assertGreater(P.GetProbabiltyOfCritical(ID=i), 0)
         self.assertLessEqual(P.GetProbabiltyOfCritical(ID=i), 1)
 def test_Float4(self):
     """Tests total floats in network with on start-start constraint"""
     activities = [activity() for number in range(4)]
     [activities[i].AssignID(i+1) for i in range(4)]
     [activities[i].AssignDuration(duration) for duration, i in zip([5,10,20,3], range(4))]   
     
     activities[0].AssignSuccsesors(2)
     activities[1].AssignSuccsesors('4ss')
     activities[2].AssignSuccsesors(4)  
     
     P = network()
     P.AddActivity(*activities)
     P.CalculateTotalFloats()
     calculated_floats = [act.GetSlack() for act in P]
     floats = [15,15,0,0]
     self.assertListEqual(calculated_floats, floats)
Exemple #6
0
classes = ["T-shirt/top","Trouser","Pullover","Dress","Coat","Sandal","Shirt","Sneaker","Bag","Ankle boot"]

transform = transforms.Compose(
    [transforms.ToTensor(),
    transforms.Normalize((0.5),(0.5))]
)

batchsize = 50

trainset = mnistDataSet('fashionmnist\\data\\fashion', "train")
testset = mnistDataSet('fashionmnist\\data\\fashion', "t10k")
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchsize, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=batchsize, shuffle=False, num_workers=2)

if __name__ == "__main__":
    net = network()

    epochs = 10
    loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    for e in range(epochs):
        net.train()
        running_loss = 0.0

        for i, d in enumerate(trainloader, 0):
            inputs, labels = d
            inputs, labels = Variable(inputs), Variable(labels)

            optimizer.zero_grad()
Exemple #7
0
#Session
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)

#Placeholder
x1 = tf.placeholder(tf.float32, [batch_size, Height, Width, Channel])
x2 = tf.placeholder(tf.float32, [batch_size, Height, Width, Channel])
x3 = tf.placeholder(tf.float32, [batch_size, Height, Width, Channel])

## MC-subnet
x1to2 = flow.warp_img(batch_size, x2, x1, False)
x3to2 = flow.warp_img(batch_size, x2, x3, True)

## QE-subnet
x2_enhanced = net.network(x1to2, x2, x3to2)

##Import data
PQF_Frame_93_Y, PQF_Frame_93_U, PQF_Frame_93_V = data.input_data(
    Height, Width, 'Frame_93')
non_PQF_Frame_96_Y, non_PQF_Frame_96_U, non_PQF_Frame_96_V = data.input_data(
    Height, Width, 'Frame_96')
PQF_Frame_97_Y, PQF_Frame_97_U, PQF_Frame_97_V = data.input_data(
    Height, Width, 'Frame_97')

##Load model
saver = tf.train.Saver()
saver.restore(sess, './HEVC_QP37_model/model.ckpt')

##Run test
Enhanced_Y = sess.run(x2_enhanced,
Exemple #8
0
    else:
        print('Cache Found at ./data_cache/')
        x_read, y_read = read_from()
        x, y = proc_data(x_read, y_read)

    x_train, x_val, y_train, y_val = train_test_split(x,
                                                      y,
                                                      test_size=0.2,
                                                      random_state=777)

    training_set = Data.TensorDataset(torch.from_numpy(x_train),
                                      torch.from_numpy(y_train))
    val_set = Data.TensorDataset(torch.from_numpy(x_val),
                                 torch.from_numpy(y_val))

    model = network()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=params['lr'])

    train_loader = Data.DataLoader(dataset=training_set,
                                   batch_size=params['batch_size'],
                                   shuffle=True,
                                   num_workers=2)

    val_loader = Data.DataLoader(dataset=val_set,
                                 batch_size=params['batch_size'],
                                 shuffle=True,
                                 num_workers=2)

    for epoch in range(params['n_epoch']):
        print("Epoch: {}\tlr: {}".format(epoch + 1,
Exemple #9
0
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

import net

net = net.network()

BSIZE = 128
MAX_ITER = 20000

for i in range(MAX_ITER):
	x_train, y_train = mnist.train.next_batch(BSIZE)
	x_train = x_train.reshape([-1,28,28,1])
	ls,ac = net.train(x_train,y_train)
	print('ITER:\t%d\tLoss:\t%.4f\tAcc:\t%.4f'%(i,ls,ac))
Exemple #10
0
def train():
    """ do training """
    args = parse_args()
    if args.enable_ce:
        fluid.default_startup_program().random_seed = SEED
        fluid.default_main_program().random_seed = SEED
    train_dir = args.train_dir
    vocab_text_path = args.vocab_text_path
    vocab_tag_path = args.vocab_tag_path
    use_cuda = True if args.use_cuda else False
    parallel = True if args.parallel else False
    batch_size = args.batch_size
    neg_size = args.neg_size
    print("use_cuda: {}, parallel: {}, batch_size: {}, neg_size: {} ".format(
        use_cuda, parallel, batch_size, neg_size))
    vocab_text_size, vocab_tag_size, train_reader = utils.prepare_data(
        file_dir=train_dir,
        vocab_text_path=vocab_text_path,
        vocab_tag_path=vocab_tag_path,
        neg_size=neg_size,
        batch_size=batch_size * get_cards(args),
        buffer_size=batch_size * 100,
        is_train=True)
    """ train network """
    # Train program
    avg_cost, correct, cos_pos = net.network(vocab_text_size,
                                             vocab_tag_size,
                                             neg_size=neg_size)

    # Optimization to minimize lost
    sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.base_lr)
    sgd_optimizer.minimize(avg_cost)

    # Initialize executor
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    if parallel:
        train_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                           loss_name=avg_cost.name)
    else:
        train_exe = exe

    pass_num = args.pass_num
    model_dir = args.model_dir
    fetch_list = [avg_cost.name]
    total_time = 0.0
    ce_info = []
    for pass_idx in range(pass_num):
        epoch_idx = pass_idx + 1
        print("epoch_%d start" % epoch_idx)
        t0 = time.time()
        for batch_id, data in enumerate(train_reader()):
            lod_text_seq = utils.to_lodtensor([dat[0] for dat in data], place)
            lod_pos_tag = utils.to_lodtensor([dat[1] for dat in data], place)
            lod_neg_tag = utils.to_lodtensor([dat[2] for dat in data], place)
            loss_val, correct_val = train_exe.run(
                feed={
                    "text": lod_text_seq,
                    "pos_tag": lod_pos_tag,
                    "neg_tag": lod_neg_tag
                },
                fetch_list=[avg_cost.name, correct.name])
            ce_info.append(
                float(np.sum(correct_val)) / (args.num_devices * batch_size))
            if batch_id % args.print_batch == 0:
                print("TRAIN --> pass: {} batch_num: {} avg_cost: {}, acc: {}".
                      format(
                          pass_idx, (batch_id + 10) * batch_size,
                          np.mean(loss_val),
                          float(np.sum(correct_val)) /
                          (args.num_devices * batch_size)))
        t1 = time.time()
        total_time += t1 - t0
        print("epoch:%d num_steps:%d time_cost(s):%f" %
              (epoch_idx, batch_id, total_time / epoch_idx))
        save_dir = "%s/epoch_%d" % (model_dir, epoch_idx)
        feed_var_names = ["text", "pos_tag"]
        fetch_vars = [cos_pos]
        fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars,
                                      exe)
    # only for ce
    if args.enable_ce:
        ce_acc = 0
        try:
            ce_acc = ce_info[-2]
        except:
            logger.error("ce info error")
        epoch_idx = args.pass_num
        device = get_device(args)
        if args.use_cuda:
            gpu_num = device[1]
            print("kpis\teach_pass_duration_gpu%s\t%s" %
                  (gpu_num, total_time / epoch_idx))
            print("kpis\ttrain_acc_gpu%s\t%s" % (gpu_num, ce_acc))
        else:
            cpu_num = device[1]
            threads_num = device[2]
            print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" %
                  (cpu_num, threads_num, total_time / epoch_idx))
            print("kpis\ttrain_acc_cpu%s_thread%s\t%s" %
                  (cpu_num, threads_num, ce_acc))

    print("finish training")
Exemple #11
0
    def train(self, train_size, test_size, data_path):
        lr = self.lr_
        batch_size = self.batch_size_

        # get images and label batch
        with tf.name_scope("input"):
            train_image_batch, train_label_batch = data_input.get_images_batch_with_labels(
                data_path=data_path + "train/",
                shape=[200, 200],
                channels=3,
                batch_size=batch_size,
                data_size=train_size)

            test_image_batch, test_label_batch = data_input.get_images_batch_with_labels(
                data_path=data_path + "test/",
                shape=[200, 200],
                channels=3,
                batch_size=batch_size,
                data_size=test_size)

            w, h, c, classes = 200, 200, 3, 5
            x = tf.placeholder(tf.float32, [batch_size, w, h, c])
            y = tf.placeholder(tf.float32, [batch_size, classes])
        # construct the loss function and the optimizer

        with tf.name_scope("Logits"):
            pred = net.network(x)
            tf.summary.histogram("pred", pred)

        with tf.name_scope("loss"):
            loss = net.loss(_logits=pred, _labels=y)
            tf.summary.scalar("loss", loss)

        with tf.name_scope("optimization"):
            optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)

        with tf.name_scope("accuracy"):
            # Model Evaluation
            correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            tf.summary.scalar("accuracy", accuracy)

        # for summary issues
        tf.summary.image("batch images", train_image_batch, max_outputs=20)

        summary_op = tf.summary.merge_all()

        # init variables
        init = tf.global_variables_initializer()

        # saver to save all the variables to checkpoint file
        saver = tf.train.Saver()

        with tf.Session() as sess:
            sess.run(init)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            summary_writer = tf.summary.FileWriter("./tmp/train/",
                                                   graph=sess.graph)

            step = 1

            while step <= self.max_iters_:
                image_batch, label_batch = sess.run(
                    [train_image_batch, train_label_batch])
                label_batch = self.get_label_batch(label_batch, classes)

                sess.run(optimizer, feed_dict={x: image_batch, y: label_batch})

                if step % self.display_step_ == 0:
                    l, acc, summary = sess.run([loss, accuracy, summary_op],
                                               feed_dict={
                                                   x: image_batch,
                                                   y: label_batch
                                               })

                    print("[Step{}]".format(step))
                    print(
                        "[Step{}]The batch loss is {:.6f}, accuracy is {:.5f}".
                        format(step, l, acc))

                    summary_writer.add_summary(summary, step)

                if step % (10 * self.display_step_) == 0:
                    # calculate the accuracy on the test dataset
                    print("[STEP{}]test time!".format(step))
                    max_steps = int(test_size / batch_size)

                    test_acc = 0
                    for i in range(max_steps):
                        test_images, test_labels = sess.run(
                            [test_image_batch, test_label_batch])
                        test_labels = self.get_label_batch(
                            test_labels, classes)

                        acc = sess.run(accuracy,
                                       feed_dict={
                                           x: test_images,
                                           y: test_labels
                                       })
                        test_acc += acc

                    print("[STEP{}]The test accuracy is:{}".format(
                        step, test_acc / max_steps))
                    print("[STEP{}]test complete!".format(step))

                step += 1

            print("Optimization Finished!")

            saver.save(sess, "./tmp/model.ckpt")

            coord.request_stop()
            coord.join(threads)
            print("Have save all variables to " + "./tmp/model.ckpt")
Exemple #12
0
def train():
    """ do training """
    args = parse_args()
    hid_size = args.hid_size
    train_dir = args.train_dir
    vocab_path = args.vocab_path
    use_cuda = True if args.use_cuda else False
    print("use_cuda:", use_cuda)
    batch_size = args.batch_size
    vocab_size, train_reader = utils.prepare_data(
        train_dir, vocab_path, batch_size=batch_size * get_cards(args),\
        buffer_size=1000, word_freq_threshold=0, is_train=True)

    # Train program
    src_wordseq, dst_wordseq, avg_cost, acc = net.network(
        vocab_size=vocab_size, hid_size=hid_size)

    # Optimization to minimize lost
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr)
    sgd_optimizer.minimize(avg_cost)

    def train_loop(main_program):
        """ train network """
        pass_num = args.pass_num
        model_dir = args.model_dir
        fetch_list = [avg_cost.name]

        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        total_time = 0.0
        for pass_idx in six.moves.xrange(pass_num):
            epoch_idx = pass_idx + 1
            print("epoch_%d start" % epoch_idx)

            t0 = time.time()
            i = 0
            newest_ppl = 0
            for data in train_reader():
                i += 1
                lod_src_wordseq = utils.to_lodtensor([dat[0] for dat in data],
                                                     place)
                lod_dst_wordseq = utils.to_lodtensor([dat[1] for dat in data],
                                                     place)
                ret_avg_cost = exe.run(main_program,
                                       feed={
                                           "src_wordseq": lod_src_wordseq,
                                           "dst_wordseq": lod_dst_wordseq
                                       },
                                       fetch_list=fetch_list)
                avg_ppl = np.exp(ret_avg_cost[0])
                newest_ppl = np.mean(avg_ppl)
                if i % args.print_batch == 0:
                    print("step:%d ppl:%.3f" % (i, newest_ppl))

            t1 = time.time()
            total_time += t1 - t0
            print("epoch:%d num_steps:%d time_cost(s):%f" %
                  (epoch_idx, i, total_time / epoch_idx))
            save_dir = "%s/epoch_%d" % (model_dir, epoch_idx)
            feed_var_names = ["src_wordseq", "dst_wordseq"]
            fetch_vars = [avg_cost, acc]
            if args.trainer_id == 0:
                fluid.io.save_inference_model(save_dir, feed_var_names,
                                              fetch_vars, exe)
                print("model saved in %s" % save_dir)
        print("finish training")

    if args.is_local:
        print("run local training")
        train_loop(fluid.default_main_program())
    else:
        print("run distribute training")
        t = fluid.DistributeTranspiler()
        t.transpile(args.trainer_id,
                    pservers=args.endpoints,
                    trainers=args.trainers)
        if args.role == "pserver":
            print("run psever")
            pserver_prog = t.get_pserver_program(args.current_endpoint)
            pserver_startup = t.get_startup_program(args.current_endpoint,
                                                    pserver_prog)
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif args.role == "trainer":
            print("run trainer")
            train_loop(t.get_trainer_program())
Exemple #13
0
def run(pattern):
    net = network(pattern)
    net.build_net()
    net.run()
Exemple #14
0
 def init_data(self):
     print("init_data")
     self.fts = fts_database()
     self.thread_running = False
     self.net = network()
     self.cmd_input.returnPressed.connect(self.handle_cmd)
def train():
    """ do training """
    args = parse_args()
    train_dir = args.train_dir
    vocab_text_path = args.vocab_text_path
    vocab_tag_path = args.vocab_tag_path
    use_cuda = True if args.use_cuda else False
    batch_size = args.batch_size
    neg_size = args.neg_size
    vocab_text_size, vocab_tag_size, train_reader = utils.prepare_data(
        file_dir=train_dir,
        vocab_text_path=vocab_text_path,
        vocab_tag_path=vocab_tag_path,
        neg_size=neg_size,
        batch_size=batch_size * get_cards(args),
        buffer_size=batch_size * 100,
        is_train=True)
    """ train network """
    # Train program
    avg_cost, correct, cos_pos = net.network(vocab_text_size,
                                             vocab_tag_size,
                                             neg_size=neg_size)

    # Optimization to minimize lost
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr)
    sgd_optimizer.minimize(avg_cost)

    def train_loop(main_program):
        # Initialize executor
        place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
        exe = fluid.Executor(place)
        pass_num = args.pass_num
        model_dir = args.model_dir
        fetch_list = [avg_cost.name]
        exe.run(fluid.default_startup_program())
        total_time = 0.0
        for pass_idx in range(pass_num):
            epoch_idx = pass_idx + 1
            print("epoch_%d start" % epoch_idx)
            t0 = time.time()
            for batch_id, data in enumerate(train_reader()):
                lod_text_seq = utils.to_lodtensor([dat[0] for dat in data],
                                                  place)
                lod_pos_tag = utils.to_lodtensor([dat[1] for dat in data],
                                                 place)
                lod_neg_tag = utils.to_lodtensor([dat[2] for dat in data],
                                                 place)
                loss_val, correct_val = exe.run(
                    feed={
                        "text": lod_text_seq,
                        "pos_tag": lod_pos_tag,
                        "neg_tag": lod_neg_tag
                    },
                    fetch_list=[avg_cost.name, correct.name])
                if batch_id % args.print_batch == 0:
                    print(
                        "TRAIN --> pass: {} batch_num: {} avg_cost: {}, acc: {}"
                        .format(pass_idx, (batch_id + 10) * batch_size,
                                np.mean(loss_val),
                                float(np.sum(correct_val)) / batch_size))
            t1 = time.time()
            total_time += t1 - t0
            print("epoch:%d num_steps:%d time_cost(s):%f" %
                  (epoch_idx, batch_id, total_time / epoch_idx))
            save_dir = "%s/epoch_%d" % (model_dir, epoch_idx)
            feed_var_names = ["text", "pos_tag"]
            fetch_vars = [cos_pos]
            fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars,
                                          exe)
        print("finish training")

    if args.is_local:
        print("run local training")
        train_loop(fluid.default_main_program())
    else:
        print("run distribute training")
        t = fluid.DistributeTranspiler()
        t.transpile(args.trainer_id,
                    pservers=args.endpoints,
                    trainers=args.trainers)
        if args.role == "pserver":
            print("run psever")
            pserver_prog = t.get_pserver_program(args.current_endpoint)
            pserver_startup = t.get_startup_program(args.current_endpoint,
                                                    pserver_prog)
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif args.role == "trainer":
            print("run trainer")
            train_loop(t.get_trainer_program())