def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1,
        vocab.word2id, args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] / 2
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run([model.loss,
            model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path+'.0'+'.tsf')
        write_sent(data1_tsf, out_path+'.1'+'.tsf')

    return losses
Exemplo n.º 2
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1,
        vocab.word2id, args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] / 2
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run([model.loss,
            model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path+'.0'+'.tsf')
        write_sent(data1_tsf, out_path+'.1'+'.tsf')

    return losses
Exemplo n.º 3
0
class TemperatureControl:
    def __init__(self, gpio, high_threshold):
        self.gpio = gpio
        self.actual_temperature = Accumulator(60)
        self.high_threshold = float(high_threshold)
        self.run_period = timedelta(minutes=10)
        self.cooldown_period = timedelta(minutes=15)
        self.start_time = None
        self.stop_time = datetime.now() - self.cooldown_period
        self.cooling_command = False
        self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.MCAST_GRP = "224.0.0.2"
        self.MCAST_PORT = 10003
        
        GPIO.setup(self.gpio, GPIO.OUT, initial=GPIO.HIGH)
        #self.__set_output(GPIO.HIGH)

    def __del__(self):
        GPIO.cleanup(self.gpio)

    def set_high_threshold(self, high_threshold):
        self.high_threshold = high_threshold

    def update(self, temp):
        self.actual_temperature.add(temp)

        #debug
        print(str(self.actual_temperature.get_mean()) + ", " + str(self.run_period) + ", " + str(self.cooldown_period) + ", " + str(self.cooling_command) + ", " + str(self.start_time) + ", " + str(self.stop_time))

        #the algorithm could have 2 variables, target temp and a delta, with a proper model of the
        #freezer/wort bucket, compute time_on = [model stuff that predicts time required to get target - delta/2]
        # and compute that value only when temperature >= target + delta/2
        #lolz, this works even better with the thresholds

        if self.cooling_command:
            run_duration = datetime.now() - self.start_time
            if run_duration >= self.run_period:
                self.cooling_command = False
                self.stop_time = datetime.now()
        else:
            if self.actual_temperature.get_mean() >= self.high_threshold:
                cooldown_duration = datetime.now() - self.stop_time
                if cooldown_duration >= self.cooldown_period:
                    self.cooling_command = True
                    self.start_time = datetime.now()
        
        self.__update_cooling()
    
    def __update_cooling(self):
        if (self.cooling_command):
            self.__set_output(GPIO.LOW)
        else:
            self.__set_output(GPIO.HIGH)

    def __set_output(self, value):
        message = "0" if value else "1"
        self.udp_socket.sendto(message.encode('utf-8'), (self.MCAST_GRP, self.MCAST_PORT))
        GPIO.output(self.gpio, value)
Exemplo n.º 4
0
    def test_prove_verify_all(self):
        acc = Accumulator()
        prover = Prover(acc)
        R = [NIL]
        for el in elements:
            acc.add(el)
            R.append(acc.get_root())

        for j in range(1, len(elements) + 1):
            w = prover.prove(j)

            result = verify(acc.get_root(), len(acc), j, w, elements[j - 1])
            assert result
Exemplo n.º 5
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0,
                                          data1,
                                          vocab.word2id,
                                          args.batch_size,
                                          max_seq_len=args.max_seq_length)

    # data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(
        len(batches),
        ['loss', 'rec', 'adv', 'd0', 'd1', 'loss_rec_cyc', 'loss_kld'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] // 2
        # data0_rec += rec[:half]
        # data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld = \
          sess.run([model.loss,
                    model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1,
                    model.loss_rec_cyc, model.kld_loss],
                   feed_dict=feed_dictionary(model=model,
                                             batch=batch,
                                             rho=args.rho,
                                             epsilon=args.epsilon,
                                             gamma=args.gamma_min,
                                             anneal=args.anneal,
                                             C=args.C))

        # feed_dict order: model, batch, rho, epsilon, gamma, dropout=1, learning_rate=None, anneal=1
        losses.add([
            loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld
        ])

    n0, n1 = len(data0), len(data1)
    # data0_rec = reorder(order0, data0_rec)[:n0]
    # data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        # write_sent(data0_rec, out_path+'.0'+'.rec')
        # write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path + 'formal' + '.tsf')
        write_sent(data1_tsf, out_path + 'informal' + '.tsf')

    return losses
Exemplo n.º 6
0
                    # do not back-propagate from the discriminator
                    # when it is too poor
                    if loss_d0 < 1.2 and loss_d1 < 1.2:
                        optimize = model.optimize_tot
                    else:
                        optimize = model.optimize_rec

                    loss, loss_rec, loss_adv, _, loss_rec_cyc, loss_vae = sess.run(
                        [
                            model.loss, model.loss_rec, model.loss_adv,
                            optimize, model.loss_rec_cyc, model.kld_loss
                        ],
                        feed_dict=feed_dict)
                    losses.add([
                        loss, loss_rec, loss_adv, loss_d0, loss_d1,
                        loss_rec_cyc, loss_vae
                    ])

                    # grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    # gradients.add([grad_rec, grad_adv, grad])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,' %
                                      (step, time.time() - start_time))
                        losses.clear()

                        checkpoint_path = os.path.join(args.model,
                                                       'model.ckpt')
Exemplo n.º 7
0
 def test_size(self):
     acc = Accumulator()
     assert (len(acc) == 0)
     for i in range(len(elements)):
         acc.add(elements[i])
         assert len(acc) == i + 1
Exemplo n.º 8
0
                    loss_d0, _ = sess.run([model.loss_d0, model.optimize_d0],
                                          feed_dict=feed_dict)
                    loss_d1, _ = sess.run([model.loss_d1, model.optimize_d1],
                                          feed_dict=feed_dict)

                    # do not back-propagate from the discriminator
                    # when it is too poor
                    if loss_d0 < 1.2 and loss_d1 < 1.2:
                        optimize = model.optimize_tot
                    else:
                        optimize = model.optimize_rec

                    loss, loss_rec, loss_adv, _ = sess.run(
                        [model.loss, model.loss_rec, model.loss_adv, optimize],
                        feed_dict=feed_dict)
                    losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

                    #grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    #gradients.add([grad_rec, grad_adv, grad])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,' %
                                      (step, time.time() - start_time))
                        losses.clear()

                        #gradients.output()
                        #gradients.clear()
Exemplo n.º 9
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1, vocab.word2id,
                                          args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = int(batch['size'] // 2)
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run(
            [
                model.loss, model.loss_rec, model.loss_adv, model.loss_d0,
                model.loss_d1
            ],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])
    #
    # with tf.Graph().as_default():
    #     sess = tf.Session()
    #     with sess.as_default():
    #         m = model.Model()
    #         m = model.Model()
    #         global_step = tf.Variable(0, name='global_step', trainable=False)
    #         optimizer = tf.train.AdamOptimizer(1e-2)
    #         grads_and_vars = optimizer.compute_gradients(m.loss)
    #         train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars, global_step=global_step)
    #
    #         loss_summary = tf.summary.scalar('loss', m.loss)
    #
    #         train_summary_op = tf.summary.merge([loss_summary])
    #         train_summary_writer = tf.summary.FileWriter('./summary/train', sess.graph)
    #
    #         dev_summary_op = tf.summary.merge([loss_summary])
    #         dev_summary_writer = tf.summary.FileWriter('./summary/dev', sess.graph)
    #
    #         def train_step(x_batch, y_batch):
    #             feed_dict = {m.input_x: x_batch,
    #                          m.input_y: y_batch}
    #             _, step, summaries, loss = sess.run(
    #                 [train_op, global_step, train_summary_op, m.loss], feed_dict)
    #             train_summary_writer.add_summary(summaries, step)
    #
    #         def dev_step(x_batch, y_batch):
    #             feed_dict = {m.input_x: x_batch,
    #                          m.input_y: y_batch}
    #
    #             step, summaries, loss = sess.run(
    #                 [global_step, dev_summary_op, m.loss], feed_dict)
    #             dev_summary_writer.add_summary(summaries, step)
    #
    #         sess.run(tf.global_variables_initializer())
    #         batches = batch_iter(list(zip(x_train, y_train)), 100, 100)
    #         for batch in batches:
    #             x_batch, y_batch = zip(*batch)
    #             train_step(x_batch, y_batch)
    #             current_step = tf.train.global_step(sess, global_step)
    #             if current_step % 3 == 0:
    #                 print('\nEvaluation:')
    #                 dev_step(x_val, y_val)
    #
    #
    # tf.summary.scalar('loss', loss)

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path + '.0' + '.tsf')
        write_sent(data1_tsf, out_path + '.1' + '.tsf')

    return losses
Exemplo n.º 10
0
def run(sess, batch_gen, dataset, model, params):

    S_batches, T_batches = batch_gen
    train_set, val_set, tar_un_set, test_set = dataset

    n_train = len(train_set['lm_labels'])
    batch_num = n_train / params.batch_size
    print('training number:%d, batch num:%d' % (n_train, batch_num))

    best_val_ote_score, best_val_ts_score = -999.0, -999.0
    best_epoch = -1

    start_time = time.time()
    losses = Accumulator(
        ['loss', 'asp_loss', 'ts_loss', 'opn_loss', 'ote_transfer_loss'],
        batch_num)

    for epoch in range(params.n_epoch):

        cur_lr = params.lr

        for i in range(batch_num):

            xs, win_xs, length_s, ys_ote, ys_ts, ys_opn, ys_stm, _, _ = S_batches.next(
            )
            xt, win_xt, length_t, yt_ote, yt_ts, yt_opn, yt_stm, _, _ = T_batches.next(
            )
            x = np.vstack([xs, xt])
            win_x = np.vstack([win_xs, win_xt])
            length = np.hstack([length_s, length_t])
            y_ote = np.vstack([ys_ote, yt_ote])
            y_ts = np.vstack([ys_ts, yt_ts])
            y_opn = np.vstack([ys_opn, yt_opn])
            y_stm = np.vstack([ys_stm, yt_stm])

            feed_dict = get_train_feed_dict(model,
                                            x,
                                            win_x,
                                            length,
                                            y_ote,
                                            y_ts,
                                            y_opn,
                                            y_stm,
                                            cur_lr,
                                            params.dropout_rate,
                                            train_flag=True)

            _, loss, asp_loss, ts_loss, opn_loss = sess.run(
                [
                    model.train_op, model.loss, model.asp_loss, model.ts_loss,
                    model.opn_loss
                ],
                feed_dict=feed_dict)
            _, ote_transfer_loss = sess.run(
                [model.ote_transfer_op, model.ote_transfer_loss],
                feed_dict=feed_dict)
            losses.add([loss, asp_loss, ts_loss, opn_loss, ote_transfer_loss])

        if epoch % params.evaluation_interval == 0:

            print('--------------------epoch %d--------------------' %
                  (epoch + 1))
            print('learning_rate:', cur_lr)

            losses.output('time %.5fs,' % (time.time() - start_time))
            losses.clear()

            train_ote_scores, train_ts_scores, _, _ = eval_metric(
                sess, model, params, train_set, domain_flag=True)
            train_ote_p, train_ote_r, train_ote_f1 = train_ote_scores
            train_ts_macro_f1, train_ts_micro_p, train_ts_micro_r, train_ts_micro_f1 = train_ts_scores

            print(
                "train performance: ote: precision: %.4f, recall: %.4f, f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
                % (train_ote_p, train_ote_r, train_ote_f1, train_ts_micro_p,
                   train_ts_micro_r, train_ts_micro_f1))

            val_ote_scores, val_ts_scores, _, _ = eval_metric(sess,
                                                              model,
                                                              params,
                                                              val_set,
                                                              domain_flag=True)
            val_ote_p, val_ote_r, val_ote_f1 = val_ote_scores
            val_ts_macro_f1, val_ts_micro_p, val_ts_micro_r, val_ts_micro_f1 = val_ts_scores

            print(
                "val performance: ote: precision: %.4f, recall: %.4f, f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
                % (val_ote_p, val_ote_r, val_ote_f1, val_ts_micro_p,
                   val_ts_micro_r, val_ts_micro_f1))

            if args.selection_schema == 'OTE_TS':
                if val_ts_micro_f1 > best_val_ts_score and val_ote_f1 > best_val_ote_score:
                    best_val_ts_score = val_ts_micro_f1
                    best_val_ote_score = val_ote_f1
                    best_epoch = epoch + 1
                    print("Save...")
                    model.save_model(sess)
            if args.selection_schema == 'TS':
                if val_ts_micro_f1 > best_val_ts_score:
                    best_val_ts_score = val_ts_micro_f1
                    best_val_ote_score = val_ote_f1
                    best_epoch = epoch + 1
                    print("Save...")
                    model.save_model(sess)

    print('Store the best model at the epoch: %d\n' % best_epoch)
Exemplo n.º 11
0
                    loss_d0, _ = sess.run([model.loss_d0, model.optimize_d0],
                        feed_dict=feed_dict)
                    loss_d1, _ = sess.run([model.loss_d1, model.optimize_d1],
                        feed_dict=feed_dict)

                    # do not back-propagate from the discriminator
                    # when it is too poor
                    if loss_d0 < 1.2 and loss_d1 < 1.2:
                        optimize = model.optimize_tot
                    else:
                        optimize = model.optimize_rec

                    loss, loss_rec, loss_adv, _ = sess.run([model.loss,
                        model.loss_rec, model.loss_adv, optimize],
                        feed_dict=feed_dict)
                    losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

                    #grad_rec, grad_adv, grad = sess.run([model.grad_rec_norm,
                    #    model.grad_adv_norm, model.grad_norm],
                    #    feed_dict=feed_dict)
                    #gradients.add([grad_rec, grad_adv, grad])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,'
                            % (step, time.time() - start_time))
                        losses.clear()

                        #gradients.output()
                        #gradients.clear()
Exemplo n.º 12
0
                    # if epoch == 1:
                    # print(linearoutput)
                    loss_d1, _ = sess.run([model.loss_d1, model.optimize_d1],
                                          feed_dict=feed_dict)

                    # do not back-propagate from the discriminator
                    # when it is too poor
                    if loss_d0 < 1.2 and loss_d1 < 1.2:
                        optimize = model.optimize_tot
                    else:
                        optimize = model.optimize_rec

                    loss, loss_rec, loss_adv, _ = sess.run(
                        [model.loss, model.loss_rec, model.loss_adv, optimize],
                        feed_dict=feed_dict)
                    losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

                    step += 1
                    if step % args.steps_per_checkpoint == 0:
                        losses.output('step %d, time %.0fs,' %
                                      (step, time.time() - start_time))
                        losses.clear()
                if args.dev:
                    dev_losses = transfer(model, decoder, sess, args, vocab,
                                          dev0, dev1,
                                          args.output + '.epoch%d' % epoch)
                    dev_losses.output('dev')
                    if dev_losses.values[0] < best_dev:
                        best_dev = dev_losses.values[0]
                        print 'saving model...'
                        model.saver.save(sess, args.model)