Exemplo n.º 1
0
def train():
    optim = tf.train.AdamOptimizer(args.meta_lr)
    train_op = get_train_op(optim, net['cent'], clip=[-10., 10.])

    saver = tf.train.Saver(tf.trainable_variables())
    logfile = open(os.path.join(savedir, 'train.log'), 'w')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    # train
    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]

    for i in range(args.n_train_iters + 1):
        # feed_dict
        epi = model.episodes
        placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
        episode = data.generate_episode(args,
                                        training=True,
                                        n_episodes=args.metabatch)
        fdtr = dict(zip(placeholders, episode))

        train_logger.accum(sess.run(train_to_run, feed_dict=fdtr))

        if i % 5 == 0:
            line = 'Iter %d start, learning rate %f' % (i, args.meta_lr)
            print('\n' + line)
            logfile.write('\n' + line + '\n')
            train_logger.print_(header='train',
                                episode=i * args.metabatch,
                                logfile=logfile)
            train_logger.clear()

            # validation (with test classes... be cautious!)
            test_logger = Accumulator('cent', 'acc')
            test_to_run = [net['cent'], net['acc']]

        if i % 20 == 0:
            for j in range(10):
                # feed_dict
                epi = model.episodes
                placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
                episode = data.generate_episode(args,
                                                training=False,
                                                n_episodes=args.metabatch)
                fdte = dict(zip(placeholders, episode))
                test_logger.accum(sess.run(test_to_run, feed_dict=fdte))

            test_logger.print_(header='test ',
                               episode=i * args.metabatch,
                               logfile=logfile)
            test_logger.clear()

        if i % args.save_freq:
            saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
Exemplo n.º 2
0
def train():
    if args.model == 'softmax':
        loss = net['cent'] + net['wd']
    else:
        loss = net['cent'] + net['wd'] + net['kl'] + net['aux'] + net['neg_ent']

    global_step = tf.train.get_or_create_global_step()
    lr_step = n_train_batches * args.n_epochs / 3
    lr = tf.train.piecewise_constant(tf.cast(global_step,
                                             tf.int32), [lr_step, lr_step * 2],
                                     [1e-3, 1e-4, 1e-5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    val_logger = Accumulator('cent', 'acc')
    val_to_run = [tnet['cent'], tnet['acc']]

    for i in range(args.n_epochs):
        # shuffle the training data every epoch
        xytr = np.concatenate((xtr, ytr), axis=1)
        np.random.shuffle(xytr)
        xtr_, ytr_ = xytr[:, :784], xytr[:, 784:]

        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = xtr_[j * bs:(j + 1) * bs, :], ytr_[j * bs:(j + 1) * bs, :]
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

        val_logger.clear()
        for j in range(n_val_batches):
            bx, by = xva[j * bs:(j + 1) * bs, :], yva[j * bs:(j + 1) * bs, :]
            val_logger.accum(sess.run(val_to_run, {x: bx, y: by}))
        val_logger.print_(header='val',
                          epoch=i + 1,
                          time=time.time() - start,
                          logfile=logfile)
        print()
        logfile.write('\n')

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 3
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1,
        vocab.word2id, args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] / 2
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run([model.loss,
            model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path+'.0'+'.tsf')
        write_sent(data1_tsf, out_path+'.1'+'.tsf')

    return losses
Exemplo n.º 4
0
def main():
    feed = csvfeed.GenericBarFeed(frequency=Frequency.MINUTE)
    feed.addBarsFromCSV("BTC", "sampledata.csv")

    # Evaluate the strategy with the feed's bars.
    myStrategy = Accumulator(feed, "BTC", buy_offset=0.0, buy_percent=0.49)
    # myStrategy.run()

    returnsAnalyzer = returns.Returns()
    myStrategy.attachAnalyzer(returnsAnalyzer)
    tradesAnalyzer = trades.Trades()
    myStrategy.attachAnalyzer(tradesAnalyzer)

    plt = plotter.StrategyPlotter(myStrategy)
    # Include the SMA in the instrument's subplot to get it displayed along with the closing prices.
    plt.getInstrumentSubplot("BTC").addDataSeries("SMA", myStrategy.getSMA())
    # Plot the simple returns on each bar.
    plt.getOrCreateSubplot("returns").addDataSeries(
        "Simple returns", returnsAnalyzer.getReturns())

    # Run the strategy.
    myStrategy.run()
    myStrategy.info("Final portfolio value: $%.2f" % myStrategy.getResult())

    # Plot the strategy.
    plt.plot()

    print("Final portfolio value: $%.2f" % myStrategy.getResult())
def test():
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    for j in range(n_test_batches):
        logger.accum(sess.run([tcent, tacc]))
    logger.print_(header='test')
Exemplo n.º 6
0
    def Compute(self, reverse=False, sign=True):
        """Compute the properties of the polygon

        :param reverse: if true then clockwise (instead of
        counter-clockwise) traversal counts as a positive area
        :param sign: if true then return a signed result for the area if the
        polygon is traversed in the "wrong" direction instead of returning
        the area for the rest of the earth
        :return: a tuple of number, perimeter (meters), area (meters^2)

        If the object is a polygon (and not a polygon), the perimeter
        includes the length of a final edge connecting the current point to
        the initial point.  If the object is a polyline, then area is nan.

        More points can be added to the polygon after this call.

        """
        if self.polyline:
            area = Math.NAN
        if self.num < 2:
            perimeter = 0.0
            if not self.polyline:
                area = 0.0
            return self.num, perimeter, area

        if self.polyline:
            perimeter = self._perimetersum.Sum()
            return self.num, perimeter, area

        _, s12, _, _, _, _, _, _, _, S12 = self.earth._GenInverse(
            self.lat1, self.lon1, self._lat0, self._lon0, self._mask
        )
        perimeter = self._perimetersum.Sum(s12)
        tempsum = Accumulator(self._areasum)
        tempsum.Add(S12)
        crossings = self._crossings + PolygonArea._transit(self.lon1, self._lon0)
        if crossings & 1:
            tempsum.Add((1 if tempsum.Sum() < 0 else -1) * self.area0 / 2)
        # area is with the clockwise sense.  If !reverse convert to
        # counter-clockwise convention.
        if not reverse:
            tempsum.Negate()
        # If sign put area in (-area0/2, area0/2], else put area in [0, area0)
        if sign:
            if tempsum.Sum() > self.area0 / 2:
                tempsum.Add(-self.area0)
            elif tempsum.Sum() <= -self.area0 / 2:
                tempsum.Add(self.area0)
        else:
            if tempsum.Sum() >= self.area0:
                tempsum.Add(-self.area0)
            elif tempsum.Sum() < 0:
                tempsum.Add(self.area0)

        area = 0.0 + tempsum.Sum()
        return self.num, perimeter, area
Exemplo n.º 7
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logfile = open(os.path.join(savedir, 'test.log'), 'w', 0)
    logger = Accumulator('cent', 'acc')
    logger.accum(sess.run([tnet['cent'], tnet['acc']], {x: xte, y: yte}))
    logger.print_(header='test', logfile=logfile)
    logfile.close()
def train():
    saver = tf.train.Saver()
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, cent, acc]
    test_logger = Accumulator('cent', 'acc')
    test_to_run = [tcent, tacc]

    argdict = vars(args)
    print(argdict)
    for k, v in argdict.iteritems():
        logfile.write(k + ': ' + str(v) + '\n')
    logfile.write('\n')

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for i in range(args.n_epochs):
        line = 'Epoch %d start, learning rate %f' % (i+1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        start = time.time()
        train_logger.clear()
        for j in range(n_train_batches):
            train_logger.accum(sess.run(train_to_run))
            if (j+1) % args.print_freq == 0:
                train_logger.print_(header='train', epoch=i+1, it=j+1,
                        time=time.time()-start, logfile=logfile)

        if (i+1) % args.eval_freq == 0:
            test_logger.clear()
            for j in range(n_test_batches):
                test_logger.accum(sess.run(test_to_run))
            test_logger.print_(header='test', epoch=i+1,
                    time=time.time()-start, logfile=logfile)

        print()
        logfile.write('\n')

        if (i+1) % args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 9
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logger = Accumulator('elbo')
    for j in range(n_test_batches):
        bx = xte[j*args.batch_size:(j+1)*args.batch_size,:]
        logger.accum(sess.run(tnet['elbo'], {x:bx}))
    print()
    logger.print_(header='test')
    print()
Exemplo n.º 10
0
    def test_prove_verify_all(self):
        acc = Accumulator()
        prover = Prover(acc)
        R = [NIL]
        for el in elements:
            acc.add(el)
            R.append(acc.get_root())

        for j in range(1, len(elements) + 1):
            w = prover.prove(j)

            result = verify(acc.get_root(), len(acc), j, w, elements[j - 1])
            assert result
Exemplo n.º 11
0
	def meter(self, rms, dbfs):
		if not self._graceful:
			if self.acc.n < ACCUMULATE:
				self.acc.addValue(dbfs)
			else:
				if self.acc.mean() < -40:
					self.points = self.points + 1
					moteflash()
				sys.stdout.write("\nAccumulation: min{:+8.3f}\tmax{:+8.3f}\tmean{:+8.3f}\tpoints{:4d}\n".format(self.acc.min_value, self.acc.max_value, self.acc.mean(), self.points))
				self.acc = Accumulator(-100,0) # reset accumulator
			mm = 128 + dbfs # motemeter value
			sys.stdout.write("\r{:+08.3f}\t{:+08.3f}".format(dbfs,mm))
			sys.stdout.flush()
			motemeter(mm)
Exemplo n.º 12
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0,
                                          data1,
                                          vocab.word2id,
                                          args.batch_size,
                                          max_seq_len=args.max_seq_length)

    # data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(
        len(batches),
        ['loss', 'rec', 'adv', 'd0', 'd1', 'loss_rec_cyc', 'loss_kld'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] // 2
        # data0_rec += rec[:half]
        # data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld = \
          sess.run([model.loss,
                    model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1,
                    model.loss_rec_cyc, model.kld_loss],
                   feed_dict=feed_dictionary(model=model,
                                             batch=batch,
                                             rho=args.rho,
                                             epsilon=args.epsilon,
                                             gamma=args.gamma_min,
                                             anneal=args.anneal,
                                             C=args.C))

        # feed_dict order: model, batch, rho, epsilon, gamma, dropout=1, learning_rate=None, anneal=1
        losses.add([
            loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld
        ])

    n0, n1 = len(data0), len(data1)
    # data0_rec = reorder(order0, data0_rec)[:n0]
    # data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        # write_sent(data0_rec, out_path+'.0'+'.rec')
        # write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path + 'formal' + '.tsf')
        write_sent(data1_tsf, out_path + 'informal' + '.tsf')

    return losses
Exemplo n.º 13
0
 def __init__(self, gpio, high_threshold):
     self.gpio = gpio
     self.actual_temperature = Accumulator(60)
     self.high_threshold = float(high_threshold)
     self.run_period = timedelta(minutes=10)
     self.cooldown_period = timedelta(minutes=15)
     self.start_time = None
     self.stop_time = datetime.now() - self.cooldown_period
     self.cooling_command = False
     self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.MCAST_GRP = "224.0.0.2"
     self.MCAST_PORT = 10003
     
     GPIO.setup(self.gpio, GPIO.OUT, initial=GPIO.HIGH)
Exemplo n.º 14
0
    def __init__(self, earth, polyline=False):
        """Construct a PolygonArea object

        :param earth: a :class:`~geographiclib.geodesic.Geodesic` object
        :param polyline: if true, treat object as a polyline instead of a polygon

        Initially the polygon has no vertices.
        """

        from geodesic import Geodesic

        # The geodesic object (readonly)
        self.earth = earth
        # Is this a polyline? (readonly)
        self.polyline = polyline
        # The total area of the ellipsoid in meter^2 (readonly)
        self.area0 = 4 * math.pi * earth._c2
        self._mask = (
            Geodesic.LATITUDE
            | Geodesic.LONGITUDE
            | Geodesic.DISTANCE
            | (
                Geodesic.EMPTY
                if self.polyline
                else Geodesic.AREA | Geodesic.LONG_UNROLL
            )
        )
        if not self.polyline:
            self._areasum = Accumulator()
        self._perimetersum = Accumulator()
        # The current number of points in the polygon (readonly)
        self.num = 0
        # The current latitude in degrees (readonly)
        self.lat1 = Math.NAN
        # The current longitude in degrees (readonly)
        self.lon1 = Math.NAN
        self.Clear()
Exemplo n.º 15
0
 def test_5add_file(self):
     acc: Accumulator = Accumulator('#')
     self.assertTrue(acc.add_file("file1"))
     self.assertTrue(acc.add_file("file2"))
     self.assertFalse(acc.add_file("file2"))
     fs = acc.get_fileset()
     self.assertEqual(2, len(fs))
     self.assertTrue(acc.add_file("file3"))
     self.assertEqual(2, len(fs))
     fs1 = acc.get_fileset()
     self.assertEqual(3, len(fs1))
     self.assertTrue(acc.add_file("file7"))
     self.assertTrue(acc.add_file("file6"))
     for a, b in zip(['file1', 'file2', 'file3', 'file7', 'file6'], acc.get_filelist()):
         self.assertEqual(a, b)
Exemplo n.º 16
0
	def __init__(self, segment_length=None):
		"""
		:param float segment_length: A float representing `AUDIO_SEGMENT_LENGTH`
		"""
		print("__init__")
		global _soundmeter
		_soundmeter = self  # Register this object globally for use in signal handlers (see below)
		self.output = BytesIO()
		self.audio = pyaudio.PyAudio()
		self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=FRAMES_PER_BUFFER)
		self.segment_length = segment_length
		self.is_running = False
		self._graceful = False  # Graceful stop switch
		self._data = {}
		self.acc = Accumulator(-100,0)
		self.points = 0
Exemplo n.º 17
0
def transform_text(text):
    tf.compat.v1.disable_eager_execution()
    args = load_arguments()
    ah = vars(args)
    ah['vocab'] = '../model/yelp.vocab'
    ah['model'] = '../model/model'
    ah['load_model'] = True
    ah['beam'] = 8
    ah['batch_size'] = 1
    inp = [text]

    vocab = Vocabulary(args.vocab, args.embedding, args.dim_emb)
    print('vocabulary size:', vocab.size)

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.compat.v1.Session(config=config) as sess:
        model = create_model(sess, args, vocab)
        decoder = beam_search.Decoder(sess, args, vocab, model)
        '''test_losses = transfer(model, decoder, sess, args, vocab,
                               test0, test1, args.output)'''

        batches, order0, order1 = get_batches(inp, inp, vocab.word2id,
                                              args.batch_size)

        data0_tsf, data1_tsf = [], []
        losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])

        # rec, tsf = decoder.rewrite(inp)

        # print(rec)
        # print(tsf)
        for batch in batches:
            rec, tsf = decoder.rewrite(batch)
            half = batch['size'] // 2
            print("rec:")
            print(rec)
            print("tsf:")
            print(tsf)
            data0_tsf += tsf[:half]
            data1_tsf += tsf[half:]
        n0, n1 = len(inp), len(inp)
        data0_tsf = reorder(order0, data0_tsf)[:n0]
        data1_tsf = reorder(order1, data1_tsf)[:n1]
        print(data0_tsf)
        print(data1_tsf)
Exemplo n.º 18
0
def next_action():
    hourly_start = request.args.get('hourly_start', None)
    hourly_end = request.args.get('hourly_end', None)
    realtime_start = request.args.get('realtime_start', None)
    realtime_end = request.args.get('realtime_end', None)

    body = digest(hourly_start, hourly_end, realtime_start, realtime_end)
    url_query = url_gnu_rl + '/mpc/'
    resp = query(url_query, url_gnu_rl, 'POST', body)

    accumulator = Accumulator(app.logger)

    mpc_dict = resp.json().copy()
    for k in list(mpc_dict.keys()):
        mpc_dict['mpc_' + k] = mpc_dict.pop(k)

    current_dict = body['current'].copy()
    current_dict['dt'] = parse_date(current_dict['dt'], toronto=True)
    n = current_dict['dt']
    current_dict['dt'] = current_dict['dt'].astimezone(get_utc_tz())
    current_dict['dt'] = current_dict['dt'].timestamp()
    current_dict['dt_utc'] = current_dict['dt']
    del current_dict['dt']

    for k in list(current_dict.keys()):
        current_dict[
            'current_' +
            k.replace(" ", "_").replace(".", "").lower()] = current_dict.pop(k)

    try:
        accumulator.add_temperature2(n, value_dict=current_dict)
        accumulator.add_temperature2(n, value_dict=mpc_dict)

    except ValueError as ex:
        app.logger.warn(
            "Accumulator - no value to add - content: {} --- {}".format(
                mpc_dict, ex))

    app.logger.info("Next Action Result : {}".format(resp.json()))
    app.logger.info("NextAction_Setpoint:{}".format(resp.json()['sat_stpt']))

    next_action_result = {
        "mpc": resp.json(),
        "heating_decision": heating_decision(resp.json())
    }

    return next_action_result
Exemplo n.º 19
0
def store_metric_environment():
    envelope = request.get_json()
    if not envelope:
        msg = 'no Pub/Sub message received'
        print(f'error: {msg}')
        return f'Bad Request: {msg}', 400

    if not isinstance(envelope, dict) or 'message' not in envelope:
        msg = 'invalid Pub/Sub message format'
        print(f'error: {msg}')
        return f'Bad Request: {msg}', 400

    pubsub_message = envelope['message']

    payload = ''
    if isinstance(pubsub_message, dict) and 'data' in pubsub_message:
        payload = base64.b64decode(
            pubsub_message['data']).decode('utf-8').strip()

    if "location:house.basement" in payload:
        print(re.match("temperature\:([0-9]+\.[0-9]+)", payload))
        json_content = {
            "temperature":
            float(
                re.match(".+temperature:([0-9]+\.[0-9]+)",
                         payload).groups()[0]),
            "original_payload":
            payload
        }
        filename = "environment_sensor_basement-" + datetime.now().strftime(
            FORMAT_DATE_DASH)
        create_file(json.dumps(json_content), filename)

        accumulator = Accumulator(app.logger)
        n = utcnow()
        try:
            accumulator.add_temperature(
                n, temp_basement=json_content.get('temperature'))
        except ValueError as ex:
            app.logger.warn(
                "Accumulator - no value to add - content: {} --- {}".format(
                    payload, ex))

    return ('', 204)
Exemplo n.º 20
0
def acc(j):
    accumulator = Accumulator(app.logger)
    n = utcnow()

    if j.get('temperature') is not None:
        j['temperature'] = float(j.get('temperature'))
    if j.get('humidity') is not None:
        j['humidity'] = float(j.get('humidity'))
    if j.get('stove_exhaust_temp') is not None:
        j['stove_exhaust_temp'] = float(j.get('stove_exhaust_temp'))

    try:
        accumulator.add_temperature2(n, value_dict=j)
    except ValueError as ex:
        app.logger.warn(
            "Accumulator - no value to add - content: {} --- {}".format(
                payload, ex))

    return accumulator
Exemplo n.º 21
0
def train():
    loss = -net['elbo']  # negative ELBO

    global_step = tf.train.get_or_create_global_step()
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                     [n_train_batches * args.n_epochs / 2],
                                     [1e-3, 1e-4])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # to run
    train_logger = Accumulator('elbo')
    train_to_run = [train_op, net['elbo']]

    for i in range(args.n_epochs):
        # shuffle the training data
        idx = np.random.choice(range(1000), size=1000, replace=False)
        xtr_ = xtr[idx]

        # run the epoch
        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print('\n' + line)
        logfile.write('\n' + line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx = xtr_[j * args.batch_size:(j + 1) * args.batch_size, :]
            train_logger.accum(sess.run(train_to_run, {x: bx}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

    # save the model
    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 22
0
    def test_7get_filelist(self):
        acc: Accumulator = Accumulator('#')
        self.assertFalse(acc.get_filelist(full_path=False))
        self.assertFalse(acc.get_filelist(full_path=True))
        self.assertTrue(acc.add_file("file1"))
        self.assertEqual("file1", acc.get_filelist(full_path=False)[0])
        self.assertEqual(1, len(acc.get_filelist(full_path=False)))
        fullpath:str = acc.get_filelist(full_path=True)[0]
        self.assertEqual('file1', os.path.basename(fullpath))
        self.assertEqual(1, len(acc.get_filelist(full_path=True)))
        self.assertFalse(acc.add_file("file1"))
        self.assertTrue(acc.add_file("file2"))
        self.assertFalse(acc.add_file("file2"))
        self.assertEqual(['file1', 'file2'], [
            os.path.basename(n)
            for n in acc.get_filelist(full_path=True)
        ])
        self.assertEqual(['file1', 'file2'], acc.get_filelist(full_path=False))
        self.assertFalse(acc.add_file("./file2"))
        self.assertEqual(2, len(acc.get_fileset()))
        #self.assertTrue(acc.add_file('/file2')) true, but this is like k:\file2
        self.assertTrue(acc.add_file("tests/file2"))
        self.assertTrue(acc.add_file("tests/file3"))
        self.assertTrue(acc.add_file("file3"))
        aa = acc.get_filelist(full_path=True)
        self.assertEqual(5, len(aa))
        expected = [  # ! TODO: this is system dependent
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file1',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file2',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\tests\\file2',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\tests\\file3',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file3'
        ]

        self.assertEqual(expected, aa)
        self.assertEqual(5, len(acc.get_fileset()))
        ab = acc.get_filelist(full_path=False)
        self.assertEqual(5, len(ab))
        expected = ['file1', 'file2', 'tests\\file2', 'tests\\file3', 'file3']
        self.assertEqual(expected, ab)
Exemplo n.º 23
0
    def test_2get_lines(self):
        acc: Accumulator = Accumulator('#')
        s5: Slot = Slot(5)
        s5.append("l1,s5\n")
        acc.add_slot(s5)
        s5a: Slot = Slot(5)
        s5a.append('l2,s5\n')
        acc.add_slot(s5a)
        st = acc.slots[5]
        s0: Slot = Slot(0)
        s0.append('l1,s0\n')
        acc.add_slot(s0)
        s99: Slot = Slot(99)
        s99.append('l1,s99\n')
        acc.add_slot(s99)
        acc.slots[5].data.append('l3,s5\n')
        result = acc.get_lines()

        expected = ['# result from processing !UNSPECIFIED!\n',
                    '\n', '#lines from Slot 0\n',
                    '\n', 'l1,s0\n',
                    '\n', '#lines from Slot 5\n',
                    '\n', 'l1,s5\n',
                    'l2,s5\n',
                    'l3,s5\n',
                    '\n',
                    '#lines from Slot 99\n',
                    '\n',
                    'l1,s99\n',
                    '\n']
        for a, b in zip(expected, result[2:]):
            self.assertEqual(a, b)
        acc.add_file('file1')
        acc.add_file('file2')
        result = acc.get_lines()
        fileinfo = result[3:6]
        self.assertEqual(
            ['# Files included by reference:\n', '#\tfile1\n', '#\tfile2\n'],
            fileinfo)
Exemplo n.º 24
0
    def test_1add_slot(self):
        acc: Accumulator = Accumulator('#')
        s5: Slot = Slot(5)
        s5.append("l1,s5")
        acc.add_slot(s5)
        self.assertEqual(set([0, 5, 99]),  acc.slots.keys())
        s5a: Slot = Slot(5)
        s5a.append('l2,s5')
        acc.add_slot(s5a)
        self.assertEqual(set([0, 5, 99]),  acc.slots.keys())
        st = acc.slots[5]
        aa = str(st)
        self.assertEqual('[id:5, len:2, d:(l1,s5, l2,s5)]', str(st))
        s0: Slot = Slot(0)
        s0.append('s0,l1\n')
        acc.add_slot(s0)
        s99: Slot = Slot(99)
        s99.append('s99,l1\n')
        acc.add_slot(s99)

        self.assertEqual(3, len(acc.slots))
        s0 = acc.slots[0]
        self.assertEqual(1, len(s0.data))
        s5 = acc.slots[5]
        self.assertEqual(2, len(s5.data))
        s99 = acc.slots[99]
        self.assertEqual(1, len(s99.data))
        self.assertTrue(5 in acc.slots)
        self.assertFalse(3 in acc.slots)
        acc.add_slot(s99)
        acc.add_slot(s0)
        self.assertEqual(2, len(s99.data))
        self.assertEqual(2, len(s0.data))
        acc.disable_priv_slots()
        acc.add_slot(s99)
        self.assertEqual(2, len(s99.data))
        acc.add_slot(s0)
        self.assertEqual(2, len(s0.data))
Exemplo n.º 25
0
            decoder = greedy_decoding.Decoder(sess, args, vocab, model)

        if args.train:
            batches, _, _ = get_batches(train0,
                                        train1,
                                        vocab.word2id,
                                        args.batch_size,
                                        noisy=True,
                                        unparallel=False,
                                        max_seq_len=args.max_seq_length)
            random.shuffle(batches)

            start_time = time.time()
            step = 0
            losses = Accumulator(
                args.steps_per_checkpoint,
                ['loss', 'rec', 'adv', 'd0', 'd1', "loss_rec_cyc", 'loss_kld'])
            best_dev = float('inf')
            learning_rate = args.learning_rate
            rho = args.rho
            epsilon = args.epsilon
            gamma = args.gamma_init
            dropout = args.dropout_keep_prob
            anneal = args.anneal
            C = args.C

            # gradients = Accumulator(args.steps_per_checkpoint,
            #    ['|grad_rec|', '|grad_adv|', '|grad|'])
            # print("***SCHEDULING C FROM 0.0 to 25.0 ***")

            # C_increase = float(args.C) / (args.max_epochs * len(batches))
Exemplo n.º 26
0
 def test_size(self):
     acc = Accumulator()
     assert (len(acc) == 0)
     for i in range(len(elements)):
         acc.add(elements[i])
         assert len(acc) == i + 1
Exemplo n.º 27
0
elif args.mode == "memb":
    lm = models.MultiEmbLanguageModel(model, args, vocab)
else:
    raise Exception("unrecognized mode")

if args.load: model.populate(args.save)

if not args.evaluate and not args.debug:
    train_batches = util.get_batches(train_data, args.minibatch_size,
                                     args.max_batched_sentence_len)
    valid_batches = util.get_batches(valid_data, args.minibatch_size,
                                     args.max_batched_sentence_len)

best_score = None
args.update_num = 0
train_accumulator = Accumulator(accs, disps)
_start = time.time()
for epoch_i in range(args.epochs):
    args.completed_epochs = epoch_i
    print "Epoch %d. Shuffling..." % epoch_i,
    if epoch_i == 0: train_batches = util.shuffle_preserve_first(train_batches)
    else: random.shuffle(train_batches)
    print "done."

    for i, batch in enumerate(train_batches):
        args.update_num += 1
        dynet.renew_cg()
        result = lm.process_batch(batch, training=True)
        nancheck = result["loss"].value()
        while (not isinstance(numpy.isnan(nancheck), numpy.bool_) and True in numpy.isnan(nancheck)) or \
              (isinstance(numpy.isnan(nancheck), numpy.bool_) and numpy.isnan(nancheck) == True):
Exemplo n.º 28
0
        if args.beam > 1:
            decoder = beam_search.Decoder(sess, args, vocab, model)
        else:
            decoder = greedy_decoding.Decoder(sess, args, vocab, model)

        if args.train:
            batches, _, _ = get_batches(train0,
                                        train1,
                                        vocab.word2id,
                                        args.batch_size,
                                        noisy=True)
            random.shuffle(batches)

            start_time = time.time()
            step = 0
            losses = Accumulator(args.steps_per_checkpoint,
                                 ['loss', 'rec', 'adv', 'd0', 'd1'])
            best_dev = float('inf')
            learning_rate = args.learning_rate
            rho = args.rho
            gamma = args.gamma_init
            dropout = args.dropout_keep_prob

            #gradients = Accumulator(args.steps_per_checkpoint,
            #    ['|grad_rec|', '|grad_adv|', '|grad|'])

            for epoch in range(1, 1 + args.max_epochs):
                print '--------------------epoch %d--------------------' % epoch
                print 'learning_rate:', learning_rate, '  gamma:', gamma

                for batch in batches:
                    feed_dict = feed_dictionary(model, batch, rho, gamma,
Exemplo n.º 29
0
##  Heartbeat
L2B1 = Heartbeat(300)
L2B2 = FtpLocalSave()
L2B1.connect_output(L2B2)

## Temperature sensor
L3B1 = TimedCallback(delay=600)
L3B2 = ReadSensorDht('Box_temperature', 21)
L3B3 = FtpLocalSave()
L3B2.connect_output(L3B3)
L3B1.callback = L3B2.read
L3B1.start()

## Humidity sensor
L4B1 = TimedCallback(delay=600)
L4B2 = ReadSensorDht('Box_humidity', 21, read_humid=True)
L4B3 = FtpLocalSave()
L4B2.connect_output(L4B3)
L4B1.callback = L4B2.read
L4B1.start()

##  Fortis natural gas pulse detection
L1B1 = PulseDetect('WLK_NatGas_MainMeter', 7)
L1B2 = Accumulator()
L1B3 = FtpLocalSave()
L1B1.connect_output(L1B2)
L1B2.connect_output(L1B3)

while (True):
    sleep(1)
Exemplo n.º 30
0
def sim_command_pipeline(pars_obj):

    global test_decimal_shift, theta_decimal_shift

    #------------------ Initializing Pipeline depths ---------------

    NB_PIPELINE_STAGES = 5
    DATAWIDTH = 32
    #-------------- Simulation Initialisations ---------------------

    reset = Signal(bool(1))
    clk = Signal(bool(0))
    elapsed_time = Signal(0)

    clkgen = clk_driver(elapsed_time, clk, period=20)

    #----------------------------------------------------------------

    #----------------- Initializing Pipeline Streams ----------------

    # --- Pipeline Pars
    pars = OperandPipelinePars()
    pars.NB_PIPELINE_STAGES = NB_PIPELINE_STAGES
    pars.DATAWIDTH = DATAWIDTH
    pars.CHANNEL_WIDTH = 2
    global floatDataBus
    if (True == floatDataBus):
        pars.INIT_DATA = 0.0  # requires floating point computation
    else:
        pars.INIT_DATA = 0  # requires intbv computation

    # --- Initializing Pipeline A
    pipe_inpA = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)
    pipe_outA = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)

    operand_a = OperandPipeline()
    ioA = OperandPipelineIo()
    ioA(pars)

    # --- Initializing Pipeline B
    pipe_inpB = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)
    pipe_outB = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH, pars.INIT_DATA)

    operand_b = OperandPipeline()
    ioB = OperandPipelineIo()
    ioB(pars)

    # --- Initializing Command Pipeline
    pipe_multRes = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                              pars.INIT_DATA)
    multcmdFile = '../tests/mult_pipeline.list'
    parsMult = CommandPipelinePars()
    parsMult.DATAWIDTH = pars.DATAWIDTH
    parsMult.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsMult.INIT_DATA = pars.INIT_DATA
    parsMult.STAGE_NB = 1
    parsMult(parsMult, multcmdFile)
    multPipe = CommandPipeline()
    ioMult = CommandPipelineIo()
    ioMult(pars)

    # ---- Initializing Accumulator Block

    pipe_out_acc = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                              pars.INIT_DATA)
    parsAcc = AccumulatorPars()
    parsAcc.DATAWIDTH = pars.DATAWIDTH
    parsAcc.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsAcc.INIT_DATA = pars.INIT_DATA
    global LEN_THETA
    parsAcc.NB_ACCUMULATIONS = LEN_THETA
    accuPipe = Accumulator()
    accuPipe(parsAcc)

    # ---- Initializing Activation Block

    parsActiv = ActivationPars()
    parsActiv.DATAWIDTH = 3  # 0 or 1 for classification
    parsActiv.CHANNEL_WIDTH = pars.CHANNEL_WIDTH
    parsActiv.INIT_DATA = pars.INIT_DATA
    pipe_out_activ = PipelineST(pars.DATAWIDTH, pars.CHANNEL_WIDTH,
                                pars.INIT_DATA)
    activPipe = Activation()
    activPipe(parsActiv)
    #----------------------------------------------------------------

    #----------------- Connecting Pipeline Blocks -------------------

    inst = []
    inst.append(
        operand_a.block_connect(pars, reset, clk, pipe_inpA, pipe_outA, ioA))
    inst.append(
        operand_b.block_connect(pars, reset, clk, pipe_inpB, pipe_outB, ioB))
    #----------------------------------------------------------------

    #----------------- Connecting Command Pipeline -------------------
    # Mult Pipeline
    inst.append(
        multPipe.block_connect(parsMult, reset, clk, ioA, ioB, pipe_multRes,
                               ioMult))

    #----------------------------------------------------------------

    #----------------- Connecting Accumulator  --------------
    # Accu
    inst.append(
        accuPipe.block_connect(parsAcc, reset, clk, 0, pipe_multRes,
                               pipe_out_acc))

    #----------------------------------------------------------------

    #----------------- Connecting Activation  --------------
    # Simple Step Activation function
    inst.append(
        activPipe.block_step_connect(parsActiv, reset, clk, pipe_out_acc,
                                     pipe_out_activ))

    #----------------------------------------------------------------

    #----------------- Logistic Regression Test File -------------------

    lr_test_file = "../tests/ex2data1.txt"
    lr_theta_file = "../tests/theta1.txt"

    #--- Loading Test and Theta Values

    test_file_list = []
    theta_file_list = []

    nb_training_examples = 0
    # Loading test data
    with open(lr_test_file, 'r') as f:
        d0 = 1.0  # Always first element is 1

        for line in f:
            #print line
            d1, d2, y = line.split(',')
            d0 = round(float(d0), DEF_ROUND)
            d1 = round(float(d1), DEF_ROUND)
            d2 = round(float(d2), DEF_ROUND)
            test_file_list.extend([d0, d1, d2])
            label.extend([int(y)])
            nb_training_examples += 1

    #loading theta
    with open(lr_theta_file, 'r') as f:
        t0, t1, t2 = (f.read().split('\n')[0]).split(',')
        t0 = round(float(t0), DEF_ROUND)
        t1 = round(float(t1), DEF_ROUND)
        t2 = round(float(t2), DEF_ROUND)
        for i in range(nb_training_examples):
            theta_file_list.extend([t0, t1, t2])

    # exp10 shifts done for theta and test data as per requirements when intbv used
    if (False == floatDataBus):
        test_file_list = [
            int(i * (10**test_decimal_shift)) for i in test_file_list
        ]
        theta_file_list = [
            int(i * (10**theta_decimal_shift)) for i in theta_file_list
        ]

    #print test_file_list
    #print theta_file_list
    #----------------------------------------------------------------

    #----------------- Shift Enable for pipeData -------------------

    shiftEn_i = Signal(bool(0))

    @always(clk.posedge, reset.posedge)
    def shift_signal():
        if reset:
            shiftEn_i.next = 1
        else:
            shiftEn_i.next = not shiftEn_i

    @always_comb
    def shiftOperand_signal():
        ioB.shiftEn_i.next = shiftEn_i
        ioA.shiftEn_i.next = shiftEn_i

    #----------------------------------------------------------------

    #----------------- Reset For the Module  --------------------

    @always(clk.posedge)
    def stimulus():
        if elapsed_time == 40:
            reset.next = 0

    #----------------------------------------------------------------

    #----------------- Input Data for the Modules  --------------------

    @always_comb
    def transmit_data_process():
        global line_nb
        if (shiftEn_i == 1 and nbTA == nbTB and nbTA < MAX_NB_TRANSFERS):

            pipe_inpA.data.next = (test_file_list[line_nb])
            pipe_inpA.valid.next = 1
            pipe_inpB.data.next = (theta_file_list[line_nb])
            pipe_inpB.valid.next = 1
            line_nb += 1

        else:
            pipe_inpA.valid.next = 0
            pipe_inpB.valid.next = 0

    #----------------------------------------------------------------

    #----------------- Storing Transmitted Data  --------------------

    @always(clk.posedge, reset.posedge)
    def trans_dataA_process():
        global trans_dataA, trans_dataB, nbTA
        if reset == 1:
            pass
        elif (pipe_inpA.valid == 1 and nbTA < MAX_NB_TRANSFERS):
            nbTA += 1
            trans_dataA.extend([pipe_inpA.data])

    @always(clk.posedge, reset.posedge)
    def trans_dataB_process():
        global trans_dataA, trans_dataB, nbTB
        if reset == 1:
            pass
        elif (pipe_inpB.valid == 1 and nbTB < MAX_NB_TRANSFERS):
            nbTB += 1
            trans_dataB.extend([pipe_inpB.data])

    #----------------------------------------------------------------

    #----------------- Storing Received Data  -----------------------

    @always(clk.posedge)
    def receive_data_process():
        global recv_data, nbR, acc_out

        # Collecting multiplier data
        if (pipe_multRes.valid == 1):
            if (False == floatDataBus):
                mult_out = pipe_multRes.data
            else:
                mult_out = (round(pipe_multRes.data, DEF_ROUND))
            recv_data.extend([mult_out])

        # Collecting Activation Data
        if (pipe_out_activ.valid == 1):
            nbR += LEN_THETA
            predict = int(pipe_out_activ.data)
            prediction_res.extend([predict])
            if __debug__:
                print(" prediction: {:d}".format(predict))
            if (nbR == MAX_NB_TRANSFERS):
                raise StopSimulation(
                    "Simulation Finished in %d clks: In total " % now() +
                    str(MAX_NB_TRANSFERS) + " data words received")

        # Collecting Accumulator Data
        if (pipe_out_acc.valid == 1):
            acc_out = pipe_out_acc.data
            #prob=(1.0/(1+ (math.exp(-1.0*acc_out) )))        # Sigmoid activation Function
            if __debug__:
                if (False == floatDataBus):
                    print("{0:d} Acc: {1:d} ".format(int(nbR / LEN_THETA + 1),
                                                     int(acc_out),
                                                     i=DEF_ROUND),
                          end=' ')
                else:
                    print("{0:d} Acc: {1:0.{i}f}".format(int(nbR / LEN_THETA +
                                                             1),
                                                         float(acc_out),
                                                         i=DEF_ROUND),
                          end=' ')
            if (False == floatDataBus):
                acc_out_list.extend([int(acc_out)])
            else:
                acc_out_list.extend([round(acc_out, DEF_ROUND)])
            #print "nbR:" + str(nbR)

    #----------------------------------------------------------------

    #----------------- Max Simulation Time Exit Condition -----------

    @always(clk.posedge)
    def simulation_time_check():
        sim_time_now = now()
        if (sim_time_now > MAX_SIM_TIME):
            raise StopSimulation(
                "Warning! Simulation Exited upon reaching max simulation time of "
                + str(MAX_SIM_TIME) + " clocks")

    #----------------------------------------------------------------
    return instances()