Exemplo n.º 1
0
    def fit(self,
            Xs,
            ys,
            Xt,
            yt,
            Xval=None,
            yval=None,
            epochs=None,
            epochs_finetune=None,
            batch_size=None,
            verbose=None):

        if epochs is None: epochs = self.epochs
        if epochs_finetune is None: epochs_finetune = self.epochs_finetune
        if batch_size is None: batch_size = self.batch_size
        if Xval is None:
            Xval = self.Xval
            yval = self.yval
        if verbose is None: verbose = self.verbose

        S_batches = batch_gen([Xs, ys], batch_size=batch_size)
        T_batches = batch_gen([Xt, yt], batch_size=batch_size)

        self.history = {'source_loss': [], 'target_loss': [], 'val_loss': []}
        print 'Epoch  sloss tloss vloss'

        self._fit(S_batches, self.train_step, epochs, batch_size, Xs, ys, Xt,
                  yt, Xval, yval)
        print 'Fine tuning on the target data...'
        self._fit(T_batches, self.train_step_finetune, epochs_finetune,
                  batch_size, Xs, ys, Xt, yt, Xval, yval)
Exemplo n.º 2
0
    def train(self, sess):
        # Dataset iterator
        train_gen, _, _ = utils.load_dataset(self.BATCH_SIZE, self.data_func)

        noise_size = (self.BATCH_SIZE, self.get_latent_dim())
        train_gen = utils.batch_gen(train_gen)

        # Train loop
        for iteration in range(self.ITERS):
            batch_xs, _ = next(train_gen)
            adver_xs = self.add_noise(batch_xs)     # adversarial examples

            _,zstar = self.autoencode_dataset(sess,adver_xs)    # projection of adversarial examples onto range of GAN

            _, rz_loss = sess.run(
                (self.en_train_op, self.res_loss_z),
                feed_dict={self.z_in: self.noise_gen(noise_size),
                           self.x_hat: adver_xs,
                           self.x: batch_xs,
                           self.z_projection: zstar})

            # Calculate dev loss and generate samples every 10 iters
            if iteration % 10 == 0:
                self.test_generate(sess, train_gen, filename='images/train_samples.png')
            print ('at iteration : ', iteration, ', z_loss : ', rz_loss)

            if (iteration % 10000 == 9999):
                print 'Saving model...'
                self.saver.save(sess, self.MODEL_SAVE_DIRECTORY + 'checkpoint-' + str(iteration))
                self.saver.export_meta_graph(self.MODEL_SAVE_DIRECTORY + 'checkpoint-' + str(iteration) + '.meta')
Exemplo n.º 3
0
    def train(self, batch_size, lr, epochs):
        self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)
        train_op = self.optimizer.minimize(self.loss)

        gen = batch_gen(batch_size)

        # Determine number of iterations
        data_path = Path.cwd().joinpath('data', 'hdf5')
        files = [x for x in data_path.glob('*.hdf5')]
        iters = int(np.floor(epochs * len(files) / batch_size))

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            sess.run(init)

            print('Training...')
            e = 1
            for i in range(iters):
                batch_vid, batch_aud = next(gen)
                sess.run(self.train_op,
                         feed_dict={
                             self.vid_in: batch_vid,
                             self.aud_in: batch_aud,
                             self.is_training: True
                         })
                if i % (len(files) / batch_size) == 0:
                    print('Epoch {} finished'.format(e))
                    e += 1
Exemplo n.º 4
0
    def train(self, session):
        # Dataset iterator
        train_gen, _, _ = utils.load_dataset(self.BATCH_SIZE, self.data_func)
        train_gen = utils.batch_gen(train_gen)

        # cache variables
        disc_cost, gen_train_op, disc_train_op = self.disc_cost, self.gen_train_op, self.disc_train_op

        # Train loop
        noise_size = (self.BATCH_SIZE, self.get_latent_dim())
        start_time = time.time()
        for iteration in range(self.ITERS):
            if iteration > 0:
                _ = session.run(
                    gen_train_op,
                    feed_dict={self.z_in: self.noise_gen(noise_size)})

            # Run discriminator
            disc_iters = self.CRITIC_ITERS
            for i in range(disc_iters):
                _data, label = next(train_gen)
                _disc_cost, _ = session.run([disc_cost, disc_train_op],
                                            feed_dict={
                                                self.z_in:
                                                self.noise_gen(noise_size),
                                                self.data:
                                                _data
                                            })

                if (iteration % 100 == 10):
                    print 'disc_cost: ', -_disc_cost

            if (iteration % 100 == 10):
                print '---------------'
                elapsed_time = time.time() - start_time
                avg_time = elapsed_time / iteration
                iter_left = self.ITERS - iteration
                time_left = int(avg_time * iter_left / 60)
                now = datetime.datetime.now()
                print(now + datetime.timedelta(minutes=time_left))
                print('time left (minutes):' + str(time_left))
                print('ETA:' +
                      str(now + datetime.timedelta(minutes=time_left)))
                print '-------------------------------------'

            # Calculate dev loss and generate samples every 100 iters
            if iteration % 100 == 10:
                self.test_generate(session,
                                   filename='images/train_samples.png')

            # Checkpoint
            if (iteration % 1000 == 999):
                print 'Saving model...'
                self.saver.save(
                    session,
                    self.MODEL_DIRECTORY + 'checkpoint-' + str(iteration))
                self.saver.export_meta_graph(self.MODEL_DIRECTORY +
                                             'checkpoint-' + str(iteration) +
                                             '.meta')
Exemplo n.º 5
0
    def eval_model(self, sess):
        accuracy_op = self.get_accuracy_op(self.logits, self.y)
        batch_gen = utils.batch_gen(self.test_epoch, True, self.y.shape[1], num_iter=1)
        iteration = 0
        normal_avr = 0
        for points, labels in batch_gen:
            iteration += 1
            avr = sess.run(accuracy_op, feed_dict={self.x: points, self.y: labels})
            normal_avr += avr
        normal_accuracy = normal_avr / iteration
        print("Normal Accuracy:", normal_accuracy)

        return normal_accuracy
Exemplo n.º 6
0
def train_model(model, X_train, X_valid, y_train, y_valid):
    # Saves the model after every epoch.
    checkpoint = ModelCheckpoint('modell-{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='auto')

    # MSE + adaptive gradient descent
    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=learning_rate),
                  metrics=['accuracy'])

    #Fit model with the generated, augmented, data in batches
    model.fit_generator(batch_gen(data_dir, X_train, y_train, batch_size,
                                  True),
                        samples_per_epoch,
                        nb_epoch,
                        max_q_size=1,
                        validation_data=batch_gen(data_dir, X_valid, y_valid,
                                                  batch_size, False),
                        nb_val_samples=len(X_valid),
                        callbacks=[checkpoint, tensorboard],
                        verbose=1)
Exemplo n.º 7
0
    def eval_model(self):
        accuracy_op = get_accuracy_op(logits, y)
        batch_size = 100
        train_epoch, _, test_epoch = utils.load_dataset(batch_size, load_func)
        batch_gen = utils.batch_gen(test_epoch, True, y.shape[1], num_iter=1)
        iteration = 0
        normal_avr = 0
        for points, labels in batch_gen:
            iteration += 1
            avr = sess.run(accuracy_op, feed_dict={x: points, y: labels})
            normal_avr += avr

        normal_accuracy = normal_avr / iteration
        print("Normal Accuracy:", normal_accuracy)

        return normal_accuracy
Exemplo n.º 8
0
 def get_train_gen(self, sess, num_epochs = 10):
     train_gen, _, _ = utils.load_dataset(self.BATCH_SIZE, self.data_func)
     return utils.batch_gen(train_gen, True, self.y.shape[1], num_epochs)
Exemplo n.º 9
0
def gen():
    yield from utils.batch_gen(VOCAB_SIZE, BATCH_SIZE, SKIP_WINDOW, SUB_SAMPLE,
                               VISUAL_FLD)
Exemplo n.º 10
0
    print 'Total Files : ', N
    print 'Sample File Name : ', image_files[100]

    for layer in feature_layers:
        print 'Processing Layer : ' + layer

        file = image_files[0]
        f0 = caffe.io.load_image(os.path.join(utils.img_dir, file))
        prediction = net.predict([f0], oversample=False)
        features = net.blobs[layer].data[0]

        X = np.zeros((N, features.size), dtype='float32')
        ids = []

        count = 0
        for files in utils.batch_gen(image_files, batch_size=batch_size):

            if count % 1000 == 0:
                print 'Processing Layer : ' + layer + " Count : ", count

            images = []
            for file in files:
                file_image = caffe.io.load_image(
                    os.path.join(utils.img_dir, file))
                images.append(file_image)

            prediction = net.predict(images, oversample=False)

            # save out all the features
            for i in range(batch_size):
                ids.append(files[i])
Exemplo n.º 11
0
Arquivo: dann.py Projeto: tmellan/ddan
    def fit(self, Xs, ys, Xt, yt=None, Xval=None, yval=None,  
        validate_every=None, epochs=None, batch_size=None, l=None):

        print 'Training on X {}, labels {}'.format(Xs.shape, ys.shape)
        print 'Target on X {}'.format(Xt.shape),
        if yt is not None: 
            print ', labels {}'.format(ys.shape)
        else:
            print

        if validate_every is None: validate_every = self.validate_every
        if epochs is None: epochs = self.epochs
        if batch_size is None: batch_size = self.batch_size
        if Xval is None:
            Xval, yval = self.Xval, self.yval
        if Xval is not None and validate_every > 0:
            print 'Validating on X {}, labels {}, every {} iterations'.format(Xval.shape, yval.shape, validate_every)

        S_batches = batch_gen([Xs, ys], batch_size/2)
        if yt is None: yt = np.ones(Xt.shape[0])
        T_batches = batch_gen([Xt, yt], batch_size/2)

        self.history = {'source_loss': [], 'target_loss': [], 'val_loss': [], 
            'source_consist': [], 'target_consist': [], 'val_consist': []}

        for i in range(epochs):

            p = i / float(epochs)
            lp = 2. / (1. + np.exp(-10*p)) - 1

            if l is not None: lp = l

            Xsource, ysource = S_batches.next()
            Xtarget, ytarget = T_batches.next()
            Xbatch = np.vstack([Xsource, Xtarget])
            ybatch = np.hstack([ysource, ytarget])
            # first half of batch is from the source batch (=0)
            # second half is the target batch (=1) 
            Dbatch = np.hstack(
                [np.zeros(batch_size/2, dtype=np.int32),
                 np.ones(batch_size/2, dtype=np.int32)]
                )

            # train step, also get training and domain losses
            _, tloss, dloss, xeloss = self.sess.run(
                [self.train_step, self.total_loss, self.domain_loss, self.xe_loss],
                feed_dict={self.X: Xbatch, self.domain: Dbatch.reshape(-1, 1), self.y: ybatch.reshape(-1, 1),
                    self.train: True, self.l: lp, K.learning_phase(): 1}
                )

            if validate_every > 0 and i % validate_every == 0:

                if i == 0:
                    print 'Epoch grl  sloss tloss vloss'
                
                self.history['source_loss'] += [self.evaluate(Xs, ys)]
                self.history['target_loss'] += [self.evaluate(Xt, yt)]
                self.history['val_loss'] += [self.evaluate(Xval, yval)]

                print '{:04d} {:.2f}  {:.5f} {:.5f} {:.5f} '.format(i, lp, 
                    self.history['source_loss'][-1], self.history['target_loss'][-1], self.history['val_loss'][-1]) 
Exemplo n.º 12
0
    def decode_packet(self, packet):

        raw = packet
        kw = {}

        if  (len(packet) < 56) or (len(packet) > 89):
            print "!invalid packet length: ", len(packet)
            #return


        nibbles = [int(utils.invert(s[::-1]),2) for s in utils.batch_gen(packet,4)]

        if len(nibbles) < 14:
            #~ logging.info("!invalid nibbles length: %s" % len(nibbles))
            return

        if len(nibbles) > 24:
            pass
#            print "!invalid nibbles length: ", len(nibbles)
        else:
            pass
#        	print "nibbles len=", len(nibbles)

        if nibbles[0] != 10:
#            print "!syncrobyte ", hex(nibbles[0]), " does not match 0xa"
            return

        sensor_type = ((nibbles[1] << 12)+(nibbles[2] << 8)+(nibbles[3] << 4) + nibbles[4])
        channel = nibbles[5]
        rolling_code = (nibbles[7] << 4) + nibbles[6]
        status = nibbles[8]

#        print "sensor type: ", hex(sensor_type)[2:], "code: ", hex(rolling_code)[2:], " channel: ", str(channel), "status: ", hex(status)[2:]

        if (sensor_type in self.SENSOR_BLOCKTYPE) :
            bt = self.SENSOR_BLOCKTYPE.get(sensor_type)
            bl = self.BLOCK_LEN.get(bt)
        else:
            bt = self.BLOCKTYPE_UNKN;
            bl = 0
            kw['error'] = "Unknown sensor type " + hex(sensor_type)[2:] + "! ";

        if (bl != 0) and (bl + 3 < len(nibbles)):
#            print "fix message len ", len(nibbles), " to ", bl
            del nibbles[bl + 3:]

        expected_checksum = sum(nibbles[:-4]) - 0xA
        checksum = nibbles[-3] * 16 + nibbles[-4]

        #~ print checksum, expected_checksum
        if checksum != expected_checksum:
		#   print "!invalid checksum: ", hex(checksum)[2:], " expected: ", hex(expected_checksum)[2:]
            return
        else:
#             print "checksum matched: ", hex(checksum)[2:]
            pass

        #TODO: !!! if sensor_type == 1d20 or 1d30 => only 3 channels (bitcoded) change channel 4 to channel 3

        kw['raw'] = raw
        kw['channel'] = str(channel)
        kw['type'] = hex(sensor_type)[2:]
        kw['code'] = hex(rolling_code)[2:]
        kw['lowbat'] = str(1 if (status & 0b0100) else 0)
        kw['forced'] = str(1 if (status & 0b1000) else 0)

        if bt == self.BLOCKTYPE_T:
            self.decode_temp(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_TH:
            self.decode_temp(nibbles, 9, kw)
            self.decode_humidity(nibbles, 13, kw)
        elif bt == self.BLOCKTYPE_UV:
            self.decode_UV(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_UV2:
            self.decode_UV(nibbles, 12, kw)
        elif bt == self.BLOCKTYPE_W:
            self.decode_wind(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_R:
            self.decode_rain(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_R2:
            self.decode_rain2(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_THB:
            self.decode_temp(nibbles, 9, kw)
            self.decode_humidity(nibbles, 13, kw)
        else:
             kw['UNKN'] = 'Ok'

        return kw
Exemplo n.º 13
0
            results[c_type][layer][n_components] = {'similarity_dist': [], 'avg_time': []}

for c_type in compression_types:
    for layer in feature_layers:
        scalar = utils.load_scalar(layer=layer)

        for n_components in pca_dimensions:
            X, ids, classes = utils.load_tsne_features(layer, n_components, tsne_dim)
            tree = KDTree(X, leaf_size=10)

            compressor = utils.load_compressor(layer=layer,
                                               dimension=n_components,
                                               compression='pca')

            count = 0
            for t_files in utils.batch_gen(test_files, batch_size=batch_size):

                if count % 50 == 0:
                    mean_dist = np.mean(results[c_type][layer][n_components]['similarity_dist'])
                    mean_time = np.mean(results[c_type][layer][n_components]['avg_time'])
                    print 'Evaluate Script :: C Type : ', c_type, ' // Layer : ', layer, ' // Dim : ', n_components, ' // Count : ', count
                    print 'Evaluate Script :: Similarity Distance : ', mean_dist, ' // Avg Time : ', mean_time

                count += 1 * batch_size

                images = []
                for t_file in t_files:
                    image_path = os.path.join(utils.test_dir, t_file)
                    images.append(caffe.io.load_image(image_path))

                # predict takes any number of images, and formats them for the Caffe net automatically
Exemplo n.º 14
0
    def parsePacket(self, packet):
        #~ print len(packet)
        if len(packet) < 38:
            return
        remainder =  (len(packet) - 6 ) % 4
        if remainder != 0:
            packet += '0'*(4-remainder)



        crc = int(packet[-8:][::-1], 2)
        fmt = int(packet[-16:-8][::-1], 2)
        addr_lo = int(packet[-32:-24][::-1], 2)
        addr_hi = int(packet[-24:-16][::-1], 2)
        addr = (addr_hi << 8)  + addr_lo


        if fmt < 4:
            sextet_1 = packet[:6]
            flip_bit = int(sextet_1[1], 2)
            cmd = int(sextet_1[2:][::-1], 2)
            args_data = packet[6:-32]
        else:
            dectet_1 = packet[:10]
            flip_bit = int(dectet_1[1], 2)
            cmd = int(dectet_1[2:][::-1], 2)

            args_data = packet[10:-32]


        #~ print "fmt=", fmt, len(args_data)
        #~ print args_data
        if fmt == 0:
            if len(args_data) != 0:
               return
        elif fmt == 1:
            if len(args_data) != 8:
               return
        elif fmt == 3:
            if len(args_data) != 32:
               return
        elif fmt == 4:
            if len(args_data) != 0:
               return
        elif fmt == 5:
            if len(args_data) != 8:
                return
        elif fmt == 6:
            if len(args_data) != 16:
                return
        elif fmt == 7:
            if len(args_data) != 32:
               return
        else:
            return

        if args_data:
            args = [int(x[::-1], 2) for x in utils.batch_gen(args_data, 8, align_right=True)]
        else:
            args = []

        return flip_bit, cmd, addr, fmt, crc, args
Exemplo n.º 15
0
 def train_model(self, sess, x, y, train_epoch, train_op, num_epochs):
     train_gen = utils.batch_gen(train_epoch, True, y.shape[1], num_epochs)
     for x_train, y_train in train_gen:
         sess.run(train_op, feed_dict={x: x_train, y: y_train})
Exemplo n.º 16
0
for c_type in compression_types:
    for layer in feature_layers:
        scalar = utils.load_scalar(layer=layer)

        for n_components in pca_dimensions:
            X, ids, classes = utils.load_tsne_features(layer, n_components,
                                                       tsne_dim)
            tree = KDTree(X, leaf_size=10)

            compressor = utils.load_compressor(layer=layer,
                                               dimension=n_components,
                                               compression='pca')

            count = 0
            for t_files in utils.batch_gen(test_files, batch_size=batch_size):

                if count % 50 == 0:
                    mean_dist = np.mean(results[c_type][layer][n_components]
                                        ['similarity_dist'])
                    mean_time = np.mean(
                        results[c_type][layer][n_components]['avg_time'])
                    print 'Evaluate Script :: C Type : ', c_type, ' // Layer : ', layer, ' // Dim : ', n_components, ' // Count : ', count
                    print 'Evaluate Script :: Similarity Distance : ', mean_dist, ' // Avg Time : ', mean_time

                count += 1 * batch_size

                images = []
                for t_file in t_files:
                    image_path = os.path.join(utils.test_dir, t_file)
                    images.append(caffe.io.load_image(image_path))
Exemplo n.º 17
0
def eval_mnist_model(sess,
                     x,
                     y,
                     dropout_rate,
                     logits,
                     adv_logits,
                     test_epoch,
                     accuracy_op,
                     adv_acc_op,
                     myVAE,
                     image_path=None,
                     name=None):
    test_gen = utils.batch_gen(test_epoch, True, y.shape[1], 1)
    it = 0

    loss_1 = tf.losses.softmax_cross_entropy(y, logits=logits)
    loss_adv = tf.losses.softmax_cross_entropy(y, logits=adv_logits)
    loss_z_l2 = tf.reduce_mean(tf.nn.l2_loss(myVAE.z - myVAE.z_in))

    #attack = attacks.LinfPGDAttack(loss_z_l2, myVAE.x_hat, 0.3, 30, 0.01, True)
    #attack = attacks.LinfPGDAttack(loss_1, x, 0.3, 30, 0.01, True)
    #attack = attacks.LinfPGDAttack(loss_adv, myVAE.x_hat, 0.3, 30, 0.01, True)

    normal_avr, adv_avr_ref, adv_avr, reconstr_avr = 0, 0, 0, 0
    for x_test, y_test in test_gen:
        it = it + 1
        #adversarial_x = get_adv_dataset(sess, logits, x, y, x_test, y_test)
        #adversarial_x = get_adv_dataset(sess, adv_logits, myVAE.x_hat, y, x_test, y_test)

        #_, z_in = myVAE.autoencode_dataset(sess, x_test)
        #adversarial_x = attack.perturb(x_test, y_test, myVAE.x_hat, y, sess, myVAE.z_in, z_in)
        #adversarial_x = attack.perturb(x_test, y_test, x, y, sess)
        #adversarial_x = attack.perturb(x_test, y_test, myVAE.x_hat, y, sess)
        cleaned_x, z_res = myVAE.autoencode_dataset(sess, adversarial_x)

        #print ('compare z vs z : ', (z_in[0] - z_res[0]), np.linalg.norm(z_in[0] - z_res[0]))

        normal_avr += sess.run(accuracy_op,
                               feed_dict={
                                   x: x_test,
                                   y: y_test,
                                   dropout_rate: 0.0
                               })
        adv_avr_ref += sess.run(accuracy_op,
                                feed_dict={
                                    x: adversarial_x,
                                    y: y_test,
                                    dropout_rate: 0.0
                                })

        adv_avr += sess.run(adv_acc_op,
                            feed_dict={
                                myVAE.x_hat: adversarial_x,
                                y: y_test,
                                dropout_rate: 0.0
                            })
        reconstr_avr += sess.run(accuracy_op,
                                 feed_dict={
                                     x: cleaned_x,
                                     y: y_test,
                                     dropout_rate: 0.0
                                 })

        if (it % 10 == 3):
            test_pred = sess.run(logits,
                                 feed_dict={
                                     x: cleaned_x,
                                     y: y_test,
                                     dropout_rate: 0.0
                                 })

            i1 = np.argmax(test_pred, 1)
            i2 = np.argmax(y_test, 1)
            index = np.where(np.not_equal(i1, i2))[0]

            p_size = len(index)
            p_size = x_test.shape[0]
            wrong_x = x_test[:, :]
            wrong_adv = adversarial_x[:, :]
            wrong_res = cleaned_x[:, :]

            utils.save_images(wrong_x.reshape(p_size, 28, 28),
                              'images/cl_original.png')
            utils.save_images(wrong_adv.reshape(p_size, 28, 28),
                              'images/cl_adversarial.png')
            utils.save_images(wrong_res.reshape(p_size, 28, 28),
                              'images/cl_reconstr.png')

    print("------------ Test ----------------")
    print(name, "Normal Accuracy:", normal_avr / it)
    print(name, "Normal Adversarial Accuracy:", adv_avr_ref / it)
    print(name, "Adversarial Accuracy:", adv_avr / it)
    print(name, "Reconstructed Accuracy:", reconstr_avr / it)
Exemplo n.º 18
0
 def get_test_gen(self, sess):
     _, _, test_gen = utils.load_dataset(self.BATCH_SIZE, self.data_func)
     return utils.batch_gen(test_gen, True, self.y.shape[1], 1)
Exemplo n.º 19
0
 def get_train_gen(self, sess):
     train_gen, _, _ = utils.load_dataset(self.BATCH_SIZE, self.data_func)
     return utils.batch_gen(train_gen)
Exemplo n.º 20
0
    def fit(self,
            Xs,
            ys,
            Xt,
            yt=None,
            Xval=None,
            yval=None,
            epochs=None,
            batch_size=None,
            verbose=None):

        if epochs is None: epochs = self.epochs
        if batch_size is None: batch_size = self.batch_size
        if Xval is None:
            Xval = self.Xval
            yval = self.yval
        if verbose is None: verbose = self.verbose

        S_batches = batch_gen([Xs, ys], batch_size=batch_size)
        if yt is None: yt = np.ones(Xt.shape[0])
        T_batches = batch_gen([Xt, yt], batch_size=batch_size)

        self.history = {
            'source_loss': [],
            'target_loss': [],
            'val_loss': [],
            'domain_loss': []
        }
        for i in range(epochs):

            Xsource, ysource = S_batches.next()
            Xtarget, ytarget = T_batches.next()

            feed_dict = {
                self.inp_a: Xsource,
                self.inp_b: Xtarget,
                self.labels_a: ysource.reshape(-1, 1),
                K.learning_phase(): 1
            }
            if self.supervised:
                feed_dict[self.labels_b] = ytarget.reshape(-1, 1)

            # train
            _, _, confusion, xeloss, dloss, tloss = self.sess.run(
                [
                    self.train_step, self.increment_confusion, self.confusion,
                    self.xe_loss, self.domain_loss, self.total_loss
                ],
                feed_dict=feed_dict)

            if self.validate_every > 0 and i % self.validate_every == 0:

                if i == 0:
                    print 'Epoch confusion  dloss  sloss tloss vloss'

                self.history['source_loss'] += [self.evaluate(Xs, ys)]
                self.history['target_loss'] += [self.evaluate(Xt, yt)]
                self.history['val_loss'] += [self.evaluate(Xval, yval)]
                self.history['domain_loss'] += [dloss]

                print '{:04d} {:.2f}  {:.4f}  {:.4f}  {:.5f} {:.5f} {:.5f} '.format(
                    i, confusion, dloss, tloss,
                    self.history['source_loss'][-1],
                    self.history['target_loss'][-1],
                    self.history['val_loss'][-1])
Exemplo n.º 21
0
    def decode_packet(self, packet):

        raw = packet
        kw = {}

        if (len(packet) < 56) or (len(packet) > 89):
            print "!invalid packet length: ", len(packet)
            #return

        nibbles = [
            int(utils.invert(s[::-1]), 2) for s in utils.batch_gen(packet, 4)
        ]

        if len(nibbles) < 14:
            #~ logging.info("!invalid nibbles length: %s" % len(nibbles))
            return

        if len(nibbles) > 24:
            pass
#            print "!invalid nibbles length: ", len(nibbles)
        else:
            pass


#        	print "nibbles len=", len(nibbles)

        if nibbles[0] != 10:
            #            print "!syncrobyte ", hex(nibbles[0]), " does not match 0xa"
            return

        sensor_type = ((nibbles[1] << 12) + (nibbles[2] << 8) +
                       (nibbles[3] << 4) + nibbles[4])
        channel = nibbles[5]
        rolling_code = (nibbles[7] << 4) + nibbles[6]
        status = nibbles[8]

        #        print "sensor type: ", hex(sensor_type)[2:], "code: ", hex(rolling_code)[2:], " channel: ", str(channel), "status: ", hex(status)[2:]

        if (sensor_type in self.SENSOR_BLOCKTYPE):
            bt = self.SENSOR_BLOCKTYPE.get(sensor_type)
            bl = self.BLOCK_LEN.get(bt)
        else:
            bt = self.BLOCKTYPE_UNKN
            bl = 0
            kw['error'] = "Unknown sensor type " + hex(sensor_type)[2:] + "! "

        if (bl != 0) and (bl + 3 < len(nibbles)):
            #            print "fix message len ", len(nibbles), " to ", bl
            del nibbles[bl + 3:]

        expected_checksum = sum(nibbles[:-4]) - 0xA
        checksum = nibbles[-3] * 16 + nibbles[-4]

        #~ print checksum, expected_checksum
        if checksum != expected_checksum:
            #   print "!invalid checksum: ", hex(checksum)[2:], " expected: ", hex(expected_checksum)[2:]
            return
        else:
            #             print "checksum matched: ", hex(checksum)[2:]
            pass

        #TODO: !!! if sensor_type == 1d20 or 1d30 => only 3 channels (bitcoded) change channel 4 to channel 3

        kw['raw'] = raw
        kw['channel'] = str(channel)
        kw['type'] = hex(sensor_type)[2:]
        kw['code'] = hex(rolling_code)[2:]
        kw['lowbat'] = str(1 if (status & 0b0100) else 0)
        kw['forced'] = str(1 if (status & 0b1000) else 0)

        if bt == self.BLOCKTYPE_T:
            self.decode_temp(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_TH:
            self.decode_temp(nibbles, 9, kw)
            self.decode_humidity(nibbles, 13, kw)
        elif bt == self.BLOCKTYPE_UV:
            self.decode_UV(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_UV2:
            self.decode_UV(nibbles, 12, kw)
        elif bt == self.BLOCKTYPE_W:
            self.decode_wind(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_R:
            self.decode_rain(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_R2:
            self.decode_rain2(nibbles, 9, kw)
        elif bt == self.BLOCKTYPE_THB:
            self.decode_temp(nibbles, 9, kw)
            self.decode_humidity(nibbles, 13, kw)
        else:
            kw['UNKN'] = 'Ok'

        return kw
Exemplo n.º 22
0
def gen():
    yield from utils.batch_gen(VOCAB_SIZE, BATCH_SIZE, VISUAL_FLD)
Exemplo n.º 23
0
    def parsePacket(self, packet):
        #~ print len(packet)
        if len(packet) < 38:
            return
        remainder = (len(packet) - 6) % 4
        if remainder != 0:
            packet += '0' * (4 - remainder)

        crc = int(packet[-8:][::-1], 2)
        fmt = int(packet[-16:-8][::-1], 2)
        addr_lo = int(packet[-32:-24][::-1], 2)
        addr_hi = int(packet[-24:-16][::-1], 2)
        addr = (addr_hi << 8) + addr_lo

        if fmt < 4:
            sextet_1 = packet[:6]
            flip_bit = int(sextet_1[1], 2)
            cmd = int(sextet_1[2:][::-1], 2)
            args_data = packet[6:-32]
        else:
            dectet_1 = packet[:10]
            flip_bit = int(dectet_1[1], 2)
            cmd = int(dectet_1[2:][::-1], 2)

            args_data = packet[10:-32]

        #~ print "fmt=", fmt, len(args_data)
        #~ print args_data
        if fmt == 0:
            if len(args_data) != 0:
                return
        elif fmt == 1:
            if len(args_data) != 8:
                return
        elif fmt == 3:
            if len(args_data) != 32:
                return
        elif fmt == 4:
            if len(args_data) != 0:
                return
        elif fmt == 5:
            if len(args_data) != 8:
                return
        elif fmt == 6:
            if len(args_data) != 16:
                return
        elif fmt == 7:
            if len(args_data) != 32:
                return
        else:
            return

        if args_data:
            args = [
                int(x[::-1], 2)
                for x in utils.batch_gen(args_data, 8, align_right=True)
            ]
        else:
            args = []

        return flip_bit, cmd, addr, fmt, crc, args