def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1,
        vocab.word2id, args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] / 2
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run([model.loss,
            model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path+'.0'+'.tsf')
        write_sent(data1_tsf, out_path+'.1'+'.tsf')

    return losses
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0, data1,
        vocab.word2id, args.batch_size)

    #data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] / 2
        #data0_rec += rec[:half]
        #data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1 = sess.run([model.loss,
            model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1],
            feed_dict=feed_dictionary(model, batch, args.rho, args.gamma_min))
        losses.add([loss, loss_rec, loss_adv, loss_d0, loss_d1])

    n0, n1 = len(data0), len(data1)
    #data0_rec = reorder(order0, data0_rec)[:n0]
    #data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        #write_sent(data0_rec, out_path+'.0'+'.rec')
        #write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path+'.0'+'.tsf')
        write_sent(data1_tsf, out_path+'.1'+'.tsf')

    return losses
Beispiel #3
0
class LogTest(unittest.TestCase):
    """Base class for Accumulator tests."""

    maxDiff = None
    initial_state = None
    occurrences = []
    expected_log = {}
    expected_state = {
        'quantity': 0,
        'price': 0,
        'results': {},
    }

    def setUp(self):
        """Creates an accumulator and accumulates all occurrences."""
        self.accumulator = Accumulator(ASSET,
                                       state=self.initial_state,
                                       logging=True)
        self.accumulate_occurrences()

    def accumulate_occurrences(self):
        """Accumulates all occurrences defined in the test case."""
        for occurrence in self.occurrences:
            self.accumulator.accumulate(occurrence)

    def test_log(self):
        """Test the log for the defined occurrences."""
        if self.expected_log:
            self.assertEqual(self.accumulator.log, self.expected_log)

    def test_accumulator_state(self):
        """Verifies the state of the accumulator data."""
        for key in self.accumulator.state.keys():
            self.assertEqual(self.accumulator.state[key],
                             self.expected_state[key])
Beispiel #4
0
class TemperatureControl:
    def __init__(self, gpio, high_threshold):
        self.gpio = gpio
        self.actual_temperature = Accumulator(60)
        self.high_threshold = float(high_threshold)
        self.run_period = timedelta(minutes=10)
        self.cooldown_period = timedelta(minutes=15)
        self.start_time = None
        self.stop_time = datetime.now() - self.cooldown_period
        self.cooling_command = False
        self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.MCAST_GRP = "224.0.0.2"
        self.MCAST_PORT = 10003
        
        GPIO.setup(self.gpio, GPIO.OUT, initial=GPIO.HIGH)
        #self.__set_output(GPIO.HIGH)

    def __del__(self):
        GPIO.cleanup(self.gpio)

    def set_high_threshold(self, high_threshold):
        self.high_threshold = high_threshold

    def update(self, temp):
        self.actual_temperature.add(temp)

        #debug
        print(str(self.actual_temperature.get_mean()) + ", " + str(self.run_period) + ", " + str(self.cooldown_period) + ", " + str(self.cooling_command) + ", " + str(self.start_time) + ", " + str(self.stop_time))

        #the algorithm could have 2 variables, target temp and a delta, with a proper model of the
        #freezer/wort bucket, compute time_on = [model stuff that predicts time required to get target - delta/2]
        # and compute that value only when temperature >= target + delta/2
        #lolz, this works even better with the thresholds

        if self.cooling_command:
            run_duration = datetime.now() - self.start_time
            if run_duration >= self.run_period:
                self.cooling_command = False
                self.stop_time = datetime.now()
        else:
            if self.actual_temperature.get_mean() >= self.high_threshold:
                cooldown_duration = datetime.now() - self.stop_time
                if cooldown_duration >= self.cooldown_period:
                    self.cooling_command = True
                    self.start_time = datetime.now()
        
        self.__update_cooling()
    
    def __update_cooling(self):
        if (self.cooling_command):
            self.__set_output(GPIO.LOW)
        else:
            self.__set_output(GPIO.HIGH)

    def __set_output(self, value):
        message = "0" if value else "1"
        self.udp_socket.sendto(message.encode('utf-8'), (self.MCAST_GRP, self.MCAST_PORT))
        GPIO.output(self.gpio, value)
Beispiel #5
0
class KegeratorData:
    def __init__(self):
        self.temperature = Accumulator(60)
        self.co2_level = Accumulator(60)
        self.kegs = [KegLevel(0), KegLevel(1)]
    
    def serialize(self):
        co2_level = (self.co2_level.get_mean() - 102) / 60
        keg0_level = (self.kegs[0].level / 12600)
        keg1_level = (self.kegs[1].level / 12600)
        return "%.2f" % self.temperature.get_mean() + ",0,%.2f" % co2_level + "," + str(keg0_level) + "," + str(keg1_level)
Beispiel #6
0
def run_task(task, aid):
    logger.debug("Running task %r", task)
    try:
        Accumulator.clear()
        result = task.run(aid)
        accumUpdates = Accumulator.values()
        return (task.id, Success(), result, accumUpdates)
    except Exception, e:
        logger.error("error in task %s", task)
        import traceback
        traceback.print_exc()
        return (task.id, OtherFailure("exception:" + str(e)), None, None)
Beispiel #7
0
def run_task(task, aid):
    logger.debug("Running task %r", task)
    try:
        Accumulator.clear()
        result = task.run(aid)
        accumUpdates = Accumulator.values()
        return (task.id, Success(), result, accumUpdates)
    except Exception, e:
        logger.error("error in task %s", task)
        import traceback
        traceback.print_exc()
        return (task.id, OtherFailure("exception:" + str(e)), None, None)
Beispiel #8
0
	def meter(self, rms, dbfs):
		if not self._graceful:
			if self.acc.n < ACCUMULATE:
				self.acc.addValue(dbfs)
			else:
				if self.acc.mean() < -40:
					self.points = self.points + 1
					moteflash()
				sys.stdout.write("\nAccumulation: min{:+8.3f}\tmax{:+8.3f}\tmean{:+8.3f}\tpoints{:4d}\n".format(self.acc.min_value, self.acc.max_value, self.acc.mean(), self.points))
				self.acc = Accumulator(-100,0) # reset accumulator
			mm = 128 + dbfs # motemeter value
			sys.stdout.write("\r{:+08.3f}\t{:+08.3f}".format(dbfs,mm))
			sys.stdout.flush()
			motemeter(mm)
Beispiel #9
0
def transfer(model, decoder, sess, args, vocab, data0, data1, out_path):
    batches, order0, order1 = get_batches(data0,
                                          data1,
                                          vocab.word2id,
                                          args.batch_size,
                                          max_seq_len=args.max_seq_length)

    # data0_rec, data1_rec = [], []
    data0_tsf, data1_tsf = [], []
    losses = Accumulator(
        len(batches),
        ['loss', 'rec', 'adv', 'd0', 'd1', 'loss_rec_cyc', 'loss_kld'])
    for batch in batches:
        rec, tsf = decoder.rewrite(batch)
        half = batch['size'] // 2
        # data0_rec += rec[:half]
        # data1_rec += rec[half:]
        data0_tsf += tsf[:half]
        data1_tsf += tsf[half:]

        loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld = \
          sess.run([model.loss,
                    model.loss_rec, model.loss_adv, model.loss_d0, model.loss_d1,
                    model.loss_rec_cyc, model.kld_loss],
                   feed_dict=feed_dictionary(model=model,
                                             batch=batch,
                                             rho=args.rho,
                                             epsilon=args.epsilon,
                                             gamma=args.gamma_min,
                                             anneal=args.anneal,
                                             C=args.C))

        # feed_dict order: model, batch, rho, epsilon, gamma, dropout=1, learning_rate=None, anneal=1
        losses.add([
            loss, loss_rec, loss_adv, loss_d0, loss_d1, loss_rec_cyc, loss_kld
        ])

    n0, n1 = len(data0), len(data1)
    # data0_rec = reorder(order0, data0_rec)[:n0]
    # data1_rec = reorder(order1, data1_rec)[:n1]
    data0_tsf = reorder(order0, data0_tsf)[:n0]
    data1_tsf = reorder(order1, data1_tsf)[:n1]

    if out_path:
        # write_sent(data0_rec, out_path+'.0'+'.rec')
        # write_sent(data1_rec, out_path+'.1'+'.rec')
        write_sent(data0_tsf, out_path + 'formal' + '.tsf')
        write_sent(data1_tsf, out_path + 'informal' + '.tsf')

    return losses
Beispiel #10
0
 def __init__(self, gpio, high_threshold):
     self.gpio = gpio
     self.actual_temperature = Accumulator(60)
     self.high_threshold = float(high_threshold)
     self.run_period = timedelta(minutes=10)
     self.cooldown_period = timedelta(minutes=15)
     self.start_time = None
     self.stop_time = datetime.now() - self.cooldown_period
     self.cooling_command = False
     self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.MCAST_GRP = "224.0.0.2"
     self.MCAST_PORT = 10003
     
     GPIO.setup(self.gpio, GPIO.OUT, initial=GPIO.HIGH)
Beispiel #11
0
 def setUp(self):
     self.acc = Accumulator(ASSET)
     self.acc.state['quantity'] = 100
     self.acc.state['price'] = 10
     self.acc.state['results'] = {'trades': 1200}
     event = DummyEvent(asset=ASSET, date='2015-09-29')
     self.acc.accumulate(event)
Beispiel #12
0
	def __init__(self, segment_length=None):
		"""
		:param float segment_length: A float representing `AUDIO_SEGMENT_LENGTH`
		"""
		print("__init__")
		global _soundmeter
		_soundmeter = self  # Register this object globally for use in signal handlers (see below)
		self.output = BytesIO()
		self.audio = pyaudio.PyAudio()
		self.stream = self.audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=FRAMES_PER_BUFFER)
		self.segment_length = segment_length
		self.is_running = False
		self._graceful = False  # Graceful stop switch
		self._data = {}
		self.acc = Accumulator(-100,0)
		self.points = 0
Beispiel #13
0
 def setUp(self):
     """Creates an accumulator and accumulates all occurrences."""
     self.accumulator = Accumulator(
         ASSET,
         state=self.initial_state,
         logging=True
     )
     self.accumulate_occurrences()
Beispiel #14
0
def store_metric_environment():
    envelope = request.get_json()
    if not envelope:
        msg = 'no Pub/Sub message received'
        print(f'error: {msg}')
        return f'Bad Request: {msg}', 400

    if not isinstance(envelope, dict) or 'message' not in envelope:
        msg = 'invalid Pub/Sub message format'
        print(f'error: {msg}')
        return f'Bad Request: {msg}', 400

    pubsub_message = envelope['message']

    payload = ''
    if isinstance(pubsub_message, dict) and 'data' in pubsub_message:
        payload = base64.b64decode(
            pubsub_message['data']).decode('utf-8').strip()

    if "location:house.basement" in payload:
        print(re.match("temperature\:([0-9]+\.[0-9]+)", payload))
        json_content = {
            "temperature":
            float(
                re.match(".+temperature:([0-9]+\.[0-9]+)",
                         payload).groups()[0]),
            "original_payload":
            payload
        }
        filename = "environment_sensor_basement-" + datetime.now().strftime(
            FORMAT_DATE_DASH)
        create_file(json.dumps(json_content), filename)

        accumulator = Accumulator(app.logger)
        n = utcnow()
        try:
            accumulator.add_temperature(
                n, temp_basement=json_content.get('temperature'))
        except ValueError as ex:
            app.logger.warn(
                "Accumulator - no value to add - content: {} --- {}".format(
                    payload, ex))

    return ('', 204)
Beispiel #15
0
def acc(j):
    accumulator = Accumulator(app.logger)
    n = utcnow()

    if j.get('temperature') is not None:
        j['temperature'] = float(j.get('temperature'))
    if j.get('humidity') is not None:
        j['humidity'] = float(j.get('humidity'))
    if j.get('stove_exhaust_temp') is not None:
        j['stove_exhaust_temp'] = float(j.get('stove_exhaust_temp'))

    try:
        accumulator.add_temperature2(n, value_dict=j)
    except ValueError as ex:
        app.logger.warn(
            "Accumulator - no value to add - content: {} --- {}".format(
                payload, ex))

    return accumulator
Beispiel #16
0
class TestAccumulateExercise(unittest.TestCase):
    """Base class for Exercise accumulation tests."""

    def setUp(self):
        self.option_accumulator = Accumulator(OPTION1)
        self.subject_accumulator = Accumulator(ASSET)
        self.exercise = copy.deepcopy(EXERCISE_OPERATION5)

    def fetch_and_accumulate(self):
        """Accumulate a exercise operation on the asset accumulator.

        and on the option accumulator When accumulating operations,
        the Operation object should be passed to the accumulator
        of all its assets.
        """
        self.exercise.fetch_operations()
        for operation in self.exercise.operations:
            self.subject_accumulator.accumulate(operation)
            self.option_accumulator.accumulate(operation)
Beispiel #17
0
class TestBaseEventAccumulation(unittest.TestCase):
    """The the accumulation of an Event object by the Accumulator."""

    def setUp(self):
        self.acc = Accumulator(ASSET)
        self.acc.state['quantity'] = 100
        self.acc.state['price'] = 10
        self.acc.state['results'] = {'trades': 1200}
        event = DummyEvent(asset=ASSET, date='2015-09-29')
        self.acc.accumulate(event)

    def test_quantity_after_event(self):
        self.assertEqual(self.acc.state['quantity'], 100)

    def test_price_after_event(self):
        self.assertEqual(self.acc.state['price'], 10)

    def test_results_after_event(self):
        self.assertEqual(self.acc.state['results'], {'trades': 1200})
Beispiel #18
0
class TestAccumulateOption00(unittest.TestCase):
    """Accumulate a Option operation."""
    def setUp(self):
        self.operation = copy.deepcopy(OPTION_OPERATION3)
        self.accumulator = Accumulator(OPTION1)
        self.accumulator.accumulate(self.operation)

    def test_returned_result(self):
        """Check the results of the operation."""
        self.assertEqual(self.operation.results, {})

    def test_accumulator_price(self):
        """Check the cost of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['price'], 10)

    def test_accumulator_quantity(self):
        """Check the quantity of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['quantity'], 100)

    def test_accumulator_results(self):
        """Check the results of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['results'], {})
def test():
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    for j in range(n_test_batches):
        logger.accum(sess.run([tcent, tacc]))
    logger.print_(header='test')
Beispiel #20
0
class LogTest(unittest.TestCase):
    """Base class for Accumulator tests."""

    maxDiff = None
    initial_state = None
    occurrences = []
    expected_log = {}
    expected_state = {
        'quantity': 0,
        'price': 0,
        'results': {},
    }

    def setUp(self):
        """Creates an accumulator and accumulates all occurrences."""
        self.accumulator = Accumulator(
            ASSET,
            state=self.initial_state,
            logging=True
        )
        self.accumulate_occurrences()

    def accumulate_occurrences(self):
        """Accumulates all occurrences defined in the test case."""
        for occurrence in self.occurrences:
            self.accumulator.accumulate(occurrence)

    def test_log(self):
        """Test the log for the defined occurrences."""
        if self.expected_log:
            self.assertEqual(self.accumulator.log, self.expected_log)

    def test_accumulator_state(self):
        """Verifies the state of the accumulator data."""
        for key in self.accumulator.state.keys():
            self.assertEqual(
                self.accumulator.state[key], self.expected_state[key]
            )
class TestAccumulateOption00(unittest.TestCase):
    """Accumulate a Option operation."""

    def setUp(self):
        self.operation = copy.deepcopy(OPTION_OPERATION3)
        self.accumulator = Accumulator(OPTION1)
        self.accumulator.accumulate(self.operation)

    def test_returned_result(self):
        """Check the results of the operation."""
        self.assertEqual(self.operation.results, {})

    def test_accumulator_price(self):
        """Check the cost of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['price'], 10)

    def test_accumulator_quantity(self):
        """Check the quantity of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['quantity'], 100)

    def test_accumulator_results(self):
        """Check the results of the option on the accumulator."""
        self.assertEqual(self.accumulator.state['results'], {})
Beispiel #22
0
    def __init__(self, earth, polyline=False):
        """Construct a PolygonArea object

        :param earth: a :class:`~geographiclib.geodesic.Geodesic` object
        :param polyline: if true, treat object as a polyline instead of a polygon

        Initially the polygon has no vertices.
        """

        from geodesic import Geodesic

        # The geodesic object (readonly)
        self.earth = earth
        # Is this a polyline? (readonly)
        self.polyline = polyline
        # The total area of the ellipsoid in meter^2 (readonly)
        self.area0 = 4 * math.pi * earth._c2
        self._mask = (
            Geodesic.LATITUDE
            | Geodesic.LONGITUDE
            | Geodesic.DISTANCE
            | (
                Geodesic.EMPTY
                if self.polyline
                else Geodesic.AREA | Geodesic.LONG_UNROLL
            )
        )
        if not self.polyline:
            self._areasum = Accumulator()
        self._perimetersum = Accumulator()
        # The current number of points in the polygon (readonly)
        self.num = 0
        # The current latitude in degrees (readonly)
        self.lat1 = Math.NAN
        # The current longitude in degrees (readonly)
        self.lon1 = Math.NAN
        self.Clear()
 def test_5add_file(self):
     acc: Accumulator = Accumulator('#')
     self.assertTrue(acc.add_file("file1"))
     self.assertTrue(acc.add_file("file2"))
     self.assertFalse(acc.add_file("file2"))
     fs = acc.get_fileset()
     self.assertEqual(2, len(fs))
     self.assertTrue(acc.add_file("file3"))
     self.assertEqual(2, len(fs))
     fs1 = acc.get_fileset()
     self.assertEqual(3, len(fs1))
     self.assertTrue(acc.add_file("file7"))
     self.assertTrue(acc.add_file("file6"))
     for a, b in zip(['file1', 'file2', 'file3', 'file7', 'file6'], acc.get_filelist()):
         self.assertEqual(a, b)
Beispiel #24
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logfile = open(os.path.join(savedir, 'test.log'), 'w', 0)
    logger = Accumulator('cent', 'acc')
    logger.accum(sess.run([tnet['cent'], tnet['acc']], {x: xte, y: yte}))
    logger.print_(header='test', logfile=logfile)
    logfile.close()
Beispiel #25
0
def transform_text(text):
    tf.compat.v1.disable_eager_execution()
    args = load_arguments()
    ah = vars(args)
    ah['vocab'] = '../model/yelp.vocab'
    ah['model'] = '../model/model'
    ah['load_model'] = True
    ah['beam'] = 8
    ah['batch_size'] = 1
    inp = [text]

    vocab = Vocabulary(args.vocab, args.embedding, args.dim_emb)
    print('vocabulary size:', vocab.size)

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.compat.v1.Session(config=config) as sess:
        model = create_model(sess, args, vocab)
        decoder = beam_search.Decoder(sess, args, vocab, model)
        '''test_losses = transfer(model, decoder, sess, args, vocab,
                               test0, test1, args.output)'''

        batches, order0, order1 = get_batches(inp, inp, vocab.word2id,
                                              args.batch_size)

        data0_tsf, data1_tsf = [], []
        losses = Accumulator(len(batches), ['loss', 'rec', 'adv', 'd0', 'd1'])

        # rec, tsf = decoder.rewrite(inp)

        # print(rec)
        # print(tsf)
        for batch in batches:
            rec, tsf = decoder.rewrite(batch)
            half = batch['size'] // 2
            print("rec:")
            print(rec)
            print("tsf:")
            print(tsf)
            data0_tsf += tsf[:half]
            data1_tsf += tsf[half:]
        n0, n1 = len(inp), len(inp)
        data0_tsf = reorder(order0, data0_tsf)[:n0]
        data1_tsf = reorder(order1, data1_tsf)[:n1]
        print(data0_tsf)
        print(data1_tsf)
Beispiel #26
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logger = Accumulator('elbo')
    for j in range(n_test_batches):
        bx = xte[j*args.batch_size:(j+1)*args.batch_size,:]
        logger.accum(sess.run(tnet['elbo'], {x:bx}))
    print()
    logger.print_(header='test')
    print()
Beispiel #27
0
def train():
    loss = -net['elbo']  # negative ELBO

    global_step = tf.train.get_or_create_global_step()
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                     [n_train_batches * args.n_epochs / 2],
                                     [1e-3, 1e-4])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # to run
    train_logger = Accumulator('elbo')
    train_to_run = [train_op, net['elbo']]

    for i in range(args.n_epochs):
        # shuffle the training data
        idx = np.random.choice(range(1000), size=1000, replace=False)
        xtr_ = xtr[idx]

        # run the epoch
        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print('\n' + line)
        logfile.write('\n' + line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx = xtr_[j * args.batch_size:(j + 1) * args.batch_size, :]
            train_logger.accum(sess.run(train_to_run, {x: bx}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

    # save the model
    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
Beispiel #28
0
    def test_prove_verify_all(self):
        acc = Accumulator()
        prover = Prover(acc)
        R = [NIL]
        for el in elements:
            acc.add(el)
            R.append(acc.get_root())

        for j in range(1, len(elements) + 1):
            w = prover.prove(j)

            result = verify(acc.get_root(), len(acc), j, w, elements[j - 1])
            assert result
    def test_7get_filelist(self):
        acc: Accumulator = Accumulator('#')
        self.assertFalse(acc.get_filelist(full_path=False))
        self.assertFalse(acc.get_filelist(full_path=True))
        self.assertTrue(acc.add_file("file1"))
        self.assertEqual("file1", acc.get_filelist(full_path=False)[0])
        self.assertEqual(1, len(acc.get_filelist(full_path=False)))
        fullpath:str = acc.get_filelist(full_path=True)[0]
        self.assertEqual('file1', os.path.basename(fullpath))
        self.assertEqual(1, len(acc.get_filelist(full_path=True)))
        self.assertFalse(acc.add_file("file1"))
        self.assertTrue(acc.add_file("file2"))
        self.assertFalse(acc.add_file("file2"))
        self.assertEqual(['file1', 'file2'], [
            os.path.basename(n)
            for n in acc.get_filelist(full_path=True)
        ])
        self.assertEqual(['file1', 'file2'], acc.get_filelist(full_path=False))
        self.assertFalse(acc.add_file("./file2"))
        self.assertEqual(2, len(acc.get_fileset()))
        #self.assertTrue(acc.add_file('/file2')) true, but this is like k:\file2
        self.assertTrue(acc.add_file("tests/file2"))
        self.assertTrue(acc.add_file("tests/file3"))
        self.assertTrue(acc.add_file("file3"))
        aa = acc.get_filelist(full_path=True)
        self.assertEqual(5, len(aa))
        expected = [  # ! TODO: this is system dependent
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file1',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file2',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\tests\\file2',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\tests\\file3',
            'm:\\Python\\Python3_packages\\cmdfilebuilder\\file3'
        ]

        self.assertEqual(expected, aa)
        self.assertEqual(5, len(acc.get_fileset()))
        ab = acc.get_filelist(full_path=False)
        self.assertEqual(5, len(ab))
        expected = ['file1', 'file2', 'tests\\file2', 'tests\\file3', 'file3']
        self.assertEqual(expected, ab)
Beispiel #30
0
def next_action():
    hourly_start = request.args.get('hourly_start', None)
    hourly_end = request.args.get('hourly_end', None)
    realtime_start = request.args.get('realtime_start', None)
    realtime_end = request.args.get('realtime_end', None)

    body = digest(hourly_start, hourly_end, realtime_start, realtime_end)
    url_query = url_gnu_rl + '/mpc/'
    resp = query(url_query, url_gnu_rl, 'POST', body)

    accumulator = Accumulator(app.logger)

    mpc_dict = resp.json().copy()
    for k in list(mpc_dict.keys()):
        mpc_dict['mpc_' + k] = mpc_dict.pop(k)

    current_dict = body['current'].copy()
    current_dict['dt'] = parse_date(current_dict['dt'], toronto=True)
    n = current_dict['dt']
    current_dict['dt'] = current_dict['dt'].astimezone(get_utc_tz())
    current_dict['dt'] = current_dict['dt'].timestamp()
    current_dict['dt_utc'] = current_dict['dt']
    del current_dict['dt']

    for k in list(current_dict.keys()):
        current_dict[
            'current_' +
            k.replace(" ", "_").replace(".", "").lower()] = current_dict.pop(k)

    try:
        accumulator.add_temperature2(n, value_dict=current_dict)
        accumulator.add_temperature2(n, value_dict=mpc_dict)

    except ValueError as ex:
        app.logger.warn(
            "Accumulator - no value to add - content: {} --- {}".format(
                mpc_dict, ex))

    app.logger.info("Next Action Result : {}".format(resp.json()))
    app.logger.info("NextAction_Setpoint:{}".format(resp.json()['sat_stpt']))

    next_action_result = {
        "mpc": resp.json(),
        "heating_decision": heating_decision(resp.json())
    }

    return next_action_result
    def test_2get_lines(self):
        acc: Accumulator = Accumulator('#')
        s5: Slot = Slot(5)
        s5.append("l1,s5\n")
        acc.add_slot(s5)
        s5a: Slot = Slot(5)
        s5a.append('l2,s5\n')
        acc.add_slot(s5a)
        st = acc.slots[5]
        s0: Slot = Slot(0)
        s0.append('l1,s0\n')
        acc.add_slot(s0)
        s99: Slot = Slot(99)
        s99.append('l1,s99\n')
        acc.add_slot(s99)
        acc.slots[5].data.append('l3,s5\n')
        result = acc.get_lines()

        expected = ['# result from processing !UNSPECIFIED!\n',
                    '\n', '#lines from Slot 0\n',
                    '\n', 'l1,s0\n',
                    '\n', '#lines from Slot 5\n',
                    '\n', 'l1,s5\n',
                    'l2,s5\n',
                    'l3,s5\n',
                    '\n',
                    '#lines from Slot 99\n',
                    '\n',
                    'l1,s99\n',
                    '\n']
        for a, b in zip(expected, result[2:]):
            self.assertEqual(a, b)
        acc.add_file('file1')
        acc.add_file('file2')
        result = acc.get_lines()
        fileinfo = result[3:6]
        self.assertEqual(
            ['# Files included by reference:\n', '#\tfile1\n', '#\tfile2\n'],
            fileinfo)
    def test_1add_slot(self):
        acc: Accumulator = Accumulator('#')
        s5: Slot = Slot(5)
        s5.append("l1,s5")
        acc.add_slot(s5)
        self.assertEqual(set([0, 5, 99]),  acc.slots.keys())
        s5a: Slot = Slot(5)
        s5a.append('l2,s5')
        acc.add_slot(s5a)
        self.assertEqual(set([0, 5, 99]),  acc.slots.keys())
        st = acc.slots[5]
        aa = str(st)
        self.assertEqual('[id:5, len:2, d:(l1,s5, l2,s5)]', str(st))
        s0: Slot = Slot(0)
        s0.append('s0,l1\n')
        acc.add_slot(s0)
        s99: Slot = Slot(99)
        s99.append('s99,l1\n')
        acc.add_slot(s99)

        self.assertEqual(3, len(acc.slots))
        s0 = acc.slots[0]
        self.assertEqual(1, len(s0.data))
        s5 = acc.slots[5]
        self.assertEqual(2, len(s5.data))
        s99 = acc.slots[99]
        self.assertEqual(1, len(s99.data))
        self.assertTrue(5 in acc.slots)
        self.assertFalse(3 in acc.slots)
        acc.add_slot(s99)
        acc.add_slot(s0)
        self.assertEqual(2, len(s99.data))
        self.assertEqual(2, len(s0.data))
        acc.disable_priv_slots()
        acc.add_slot(s99)
        self.assertEqual(2, len(s99.data))
        acc.add_slot(s0)
        self.assertEqual(2, len(s0.data))
Beispiel #33
0
 def test_size(self):
     acc = Accumulator()
     assert (len(acc) == 0)
     for i in range(len(elements)):
         acc.add(elements[i])
         assert len(acc) == i + 1
Beispiel #34
0
    def runJob(self, finalRdd, func, partitions, allowLocal):
        outputParts = list(partitions)
        numOutputParts = len(partitions)
        finalStage = self.newStage(finalRdd, None)
        results = [None] * numOutputParts
        finished = [None] * numOutputParts
        lastFinished = 0
        numFinished = 0

        waiting = set()
        running = set()
        failed = set()
        pendingTasks = {}
        lastFetchFailureTime = 0

        self.updateCacheLocs()

        logger.debug("Final stage: %s, %d", finalStage, numOutputParts)
        logger.debug("Parents of final stage: %s", finalStage.parents)
        logger.debug("Missing parents: %s", self.getMissingParentStages(finalStage))

        if (
            allowLocal
            and (not finalStage.parents or not self.getMissingParentStages(finalStage))
            and numOutputParts == 1
        ):
            split = finalRdd.splits[outputParts[0]]
            yield func(finalRdd.iterator(split))
            return

        def submitStage(stage):
            logger.debug("submit stage %s", stage)
            if stage not in waiting and stage not in running:
                missing = self.getMissingParentStages(stage)
                if not missing:
                    submitMissingTasks(stage)
                    running.add(stage)
                else:
                    for parent in missing:
                        submitStage(parent)
                    waiting.add(stage)

        def submitMissingTasks(stage):
            myPending = pendingTasks.setdefault(stage, set())
            tasks = []
            have_prefer = True
            if stage == finalStage:
                for i in range(numOutputParts):
                    if not finished[i]:
                        part = outputParts[i]
                        if have_prefer:
                            locs = self.getPreferredLocs(finalRdd, part)
                            if not locs:
                                have_prefer = False
                        else:
                            locs = []
                        tasks.append(ResultTask(finalStage.id, finalRdd, func, part, locs, i))
            else:
                for p in range(stage.numPartitions):
                    if not stage.outputLocs[p]:
                        if have_prefer:
                            locs = self.getPreferredLocs(stage.rdd, p)
                            if not locs:
                                have_prefer = False
                        else:
                            locs = []
                        tasks.append(ShuffleMapTask(stage.id, stage.rdd, stage.shuffleDep, p, locs))
            logger.debug("add to pending %s tasks", len(tasks))
            myPending |= set(t.id for t in tasks)
            self.submitTasks(tasks)

        submitStage(finalStage)

        while numFinished != numOutputParts:
            try:
                evt = self.completionEvents.get(False)
            except Queue.Empty:
                self.check()
                if self._shutdown:
                    sys.exit(1)

                if failed and time.time() > lastFetchFailureTime + RESUBMIT_TIMEOUT:
                    logger.debug("Resubmitting failed stages")
                    self.updateCacheLocs()
                    for stage in failed:
                        submitStage(stage)
                    failed.clear()
                else:
                    time.sleep(0.1)
                continue

            task = evt.task
            stage = self.idToStage[task.stageId]
            logger.debug("remove from pedding %s from %s", task, stage)
            pendingTasks[stage].remove(task.id)
            if isinstance(evt.reason, Success):
                Accumulator.merge(evt.accumUpdates)
                if isinstance(task, ResultTask):
                    finished[task.outputId] = True
                    numFinished += 1
                    if self.keep_order:
                        results[task.outputId] = evt.result
                        while lastFinished < numOutputParts and finished[lastFinished]:
                            yield results[lastFinished]
                            results[lastFinished] = None
                            lastFinished += 1
                    else:
                        yield evt.result

                elif isinstance(task, ShuffleMapTask):
                    stage = self.idToStage[task.stageId]
                    stage.addOutputLoc(task.partition, evt.result)
                    if not pendingTasks[stage]:
                        logger.debug("%s finished; looking for newly runnable stages", stage)
                        running.remove(stage)
                        if stage.shuffleDep != None:
                            self.mapOutputTracker.registerMapOutputs(
                                stage.shuffleDep.shuffleId, [l[0] for l in stage.outputLocs]
                            )
                        self.updateCacheLocs()
                        newlyRunnable = set(stage for stage in waiting if not self.getMissingParentStages(stage))
                        waiting -= newlyRunnable
                        running |= newlyRunnable
                        logger.debug("newly runnable: %s, %s", waiting, newlyRunnable)
                        for stage in newlyRunnable:
                            submitMissingTasks(stage)
            else:
                logger.error("task %s failed", task)
                if isinstance(evt.reason, FetchFailed):
                    pass
                else:
                    logger.error("%s %s %s", evt.reason, type(evt.reason), evt.reason.message)
                    raise evt.reason

        assert not any(results)
        return
Beispiel #35
0
elif args.mode == "memb":
    lm = models.MultiEmbLanguageModel(model, args, vocab)
else:
    raise Exception("unrecognized mode")

if args.load: model.populate(args.save)

if not args.evaluate and not args.debug:
    train_batches = util.get_batches(train_data, args.minibatch_size,
                                     args.max_batched_sentence_len)
    valid_batches = util.get_batches(valid_data, args.minibatch_size,
                                     args.max_batched_sentence_len)

best_score = None
args.update_num = 0
train_accumulator = Accumulator(accs, disps)
_start = time.time()
for epoch_i in range(args.epochs):
    args.completed_epochs = epoch_i
    print "Epoch %d. Shuffling..." % epoch_i,
    if epoch_i == 0: train_batches = util.shuffle_preserve_first(train_batches)
    else: random.shuffle(train_batches)
    print "done."

    for i, batch in enumerate(train_batches):
        args.update_num += 1
        dynet.renew_cg()
        result = lm.process_batch(batch, training=True)
        nancheck = result["loss"].value()
        while (not isinstance(numpy.isnan(nancheck), numpy.bool_) and True in numpy.isnan(nancheck)) or \
              (isinstance(numpy.isnan(nancheck), numpy.bool_) and numpy.isnan(nancheck) == True):
Beispiel #36
0
##  Heartbeat
L2B1 = Heartbeat(300)
L2B2 = FtpLocalSave()
L2B1.connect_output(L2B2)

## Temperature sensor
L3B1 = TimedCallback(delay=600)
L3B2 = ReadSensorDht('Box_temperature', 21)
L3B3 = FtpLocalSave()
L3B2.connect_output(L3B3)
L3B1.callback = L3B2.read
L3B1.start()

## Humidity sensor
L4B1 = TimedCallback(delay=600)
L4B2 = ReadSensorDht('Box_humidity', 21, read_humid=True)
L4B3 = FtpLocalSave()
L4B2.connect_output(L4B3)
L4B1.callback = L4B2.read
L4B1.start()

##  Fortis natural gas pulse detection
L1B1 = PulseDetect('WLK_NatGas_MainMeter', 7)
L1B2 = Accumulator()
L1B3 = FtpLocalSave()
L1B1.connect_output(L1B2)
L1B2.connect_output(L1B3)

while (True):
    sleep(1)
 def setUp(self):
     self.option_accumulator = Accumulator(OPTION1)
     self.subject_accumulator = Accumulator(ASSET)
     self.exercise = copy.deepcopy(EXERCISE_OPERATION5)
    with tf.Session(config=config) as sess:
        model = create_model(sess, args, vocab)

        if args.beam > 1:
            decoder = beam_search.Decoder(sess, args, vocab, model)
        else:
            decoder = greedy_decoding.Decoder(sess, args, vocab, model)

        if args.train:
            batches, _, _ = get_batches(train0, train1, vocab.word2id,
                args.batch_size, noisy=True)
            random.shuffle(batches)

            start_time = time.time()
            step = 0
            losses = Accumulator(args.steps_per_checkpoint,
                ['loss', 'rec', 'adv', 'd0', 'd1'])
            best_dev = float('inf')
            learning_rate = args.learning_rate
            rho = args.rho
            gamma = args.gamma_init
            dropout = args.dropout_keep_prob

            #gradients = Accumulator(args.steps_per_checkpoint,
            #    ['|grad_rec|', '|grad_adv|', '|grad|'])

            for epoch in range(1, 1+args.max_epochs):
                print '--------------------epoch %d--------------------' % epoch
                print 'learning_rate:', learning_rate, '  gamma:', gamma

                for batch in batches:
                    feed_dict = feed_dictionary(model, batch, rho, gamma,
Beispiel #39
0
            decoder = greedy_decoding.Decoder(sess, args, vocab, model)

        if args.train:
            batches, _, _ = get_batches(train0,
                                        train1,
                                        vocab.word2id,
                                        args.batch_size,
                                        noisy=True,
                                        unparallel=False,
                                        max_seq_len=args.max_seq_length)
            random.shuffle(batches)

            start_time = time.time()
            step = 0
            losses = Accumulator(
                args.steps_per_checkpoint,
                ['loss', 'rec', 'adv', 'd0', 'd1', "loss_rec_cyc", 'loss_kld'])
            best_dev = float('inf')
            learning_rate = args.learning_rate
            rho = args.rho
            epsilon = args.epsilon
            gamma = args.gamma_init
            dropout = args.dropout_keep_prob
            anneal = args.anneal
            C = args.C

            # gradients = Accumulator(args.steps_per_checkpoint,
            #    ['|grad_rec|', '|grad_adv|', '|grad|'])
            # print("***SCHEDULING C FROM 0.0 to 25.0 ***")

            # C_increase = float(args.C) / (args.max_epochs * len(batches))
 def setUp(self):
     self.operation = copy.deepcopy(OPTION_OPERATION3)
     self.accumulator = Accumulator(OPTION1)
     self.accumulator.accumulate(self.operation)
Beispiel #41
0
def train():
    if args.model == 'softmax':
        loss = net['cent'] + net['wd']
    else:
        loss = net['cent'] + net['wd'] + net['kl'] + net['aux'] + net['neg_ent']

    global_step = tf.train.get_or_create_global_step()
    lr_step = n_train_batches * args.n_epochs / 3
    lr = tf.train.piecewise_constant(tf.cast(global_step,
                                             tf.int32), [lr_step, lr_step * 2],
                                     [1e-3, 1e-4, 1e-5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    val_logger = Accumulator('cent', 'acc')
    val_to_run = [tnet['cent'], tnet['acc']]

    for i in range(args.n_epochs):
        # shuffle the training data every epoch
        xytr = np.concatenate((xtr, ytr), axis=1)
        np.random.shuffle(xytr)
        xtr_, ytr_ = xytr[:, :784], xytr[:, 784:]

        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = xtr_[j * bs:(j + 1) * bs, :], ytr_[j * bs:(j + 1) * bs, :]
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

        val_logger.clear()
        for j in range(n_val_batches):
            bx, by = xva[j * bs:(j + 1) * bs, :], yva[j * bs:(j + 1) * bs, :]
            val_logger.accum(sess.run(val_to_run, {x: bx, y: by}))
        val_logger.print_(header='val',
                          epoch=i + 1,
                          time=time.time() - start,
                          logfile=logfile)
        print()
        logfile.write('\n')

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))