Beispiel #1
0
 def generate_keys(self):
     p = maurer_generator(key_length)
     g = generator(p)
     x = random.randint(1, p - 1)
     y = pow(g, x, p)
     self.public_key = (y, g, p)
     self.private_key = x
def make_gif(dirpath="../samples/exp0", save2path="../demo/test_imageio.gif"):
    import imageio
    import cv2
    dirname = "/".join(save2path.split("/")[:-1])
    if not os.path.exists(dirname):
        os.makedirs(dirname)

    collections = []
    for file in generator(root_dir=dirpath,
                          file_type='png',
                          file_label_fun=None,
                          stop_after=None,
                          verbose=1):
        collections.append(file)
        # print(file)
    collections.sort()
    collections = sorted(collections,
                         key=lambda x: int(x.split("/")[-1].split(".")[0]))
    # print(collections)
    print("Reading images...")
    collections = [cv2.imread(x) for x in collections[100:-100]]
    # collections = list(map(lambda file:cv2.imread(file), collections))
    print("+ Done.")
    print("Making Gif...")
    imageio.mimsave(save2path, collections)
    print("+ Done.")
Beispiel #3
0
def get_accuracy(model, test_loader):
    num_samples = 0
    num_correct = 0
    for (labels, outputs) in generator(model, test_loader):
        outputs = outputs > 0
        num_samples += torch.numel(labels)
        num_correct += (labels == outputs).sum().item()
    return num_correct / num_samples
Beispiel #4
0
    def _input_fn():
        if partition == "train":
            dataset = tf.data.Dataset.from_generator(
                generator(x_train, y_train), (tf.float32, tf.int32),
                ((28 * 28), ()))
        else:
            dataset = tf.data.Dataset.from_generator(generator(x_test, y_test),
                                                     (tf.float32, tf.int32),
                                                     ((28 * 28), ()))

        if training:
            dataset = dataset.shuffle(10 * batch_size,
                                      seed=RANDOM_SEED).repeat()

        dataset = dataset.map(preprocess_image).batch(batch_size)
        iterator = dataset.make_one_shot_iterator()
        features, labels = iterator.get_next()
        return features, labels
Beispiel #5
0
def train_model(model, train, valid, batch_size):
    model.compile(loss='mse', optimizer='adam')  # using adam optimizer

    # saving model at each epoch
    checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='auto')

    model.fit_generator(generator(train, batch_size),
                        steps_per_epoch=math.ceil(len(train) / batch_size),
                        validation_data=generator(valid, batch_size),
                        validation_steps=math.ceil(len(valid) / batch_size),
                        epochs=1,
                        verbose=1,
                        callbacks=[checkpoint])

    model.save('model.h5')
Beispiel #6
0
    def __init__(self, *args, **kwargs):
        xbmcgui.WindowXML.__init__(self, *args, **kwargs)
        #xbmcgui.WindowXMLDialog.__init__(self, *args, **kwargs)   #<--- what's the difference?
        self.subreddits_file = kwargs.get("subreddits_file")
        self.listing = kwargs.get("listing")
        self.main_control_id = kwargs.get("id")

        #self.gui_listbox.addItems(self.listing)

        listing_generator=generator(self.listing)

        tlc_id=0 #create id's for top-level-comments
        self.child_lists[:] = []  #a collection of child comments (non-tlc) tlc_children
        tlc_children=[]
        #for listing in self.listing:
        for listing in listing_generator:
            depth=int(listing.getProperty('comment_depth'))
            #add root comments and links on the listbox
            if not listing.getProperty('link_url'):
                if depth==0:
                    tlc_id+=1
                    listing.setProperty('tlc_id',str(tlc_id)) #assign an id to this top-level-comment
                    self.items_for_listbox.append(listing)    #this post will be on the listbox
                    #log('tlc: '+listing.getProperty('plot'))

                    #save the set of comments from previous top level comment
                    self.child_lists.append(tlc_children)

                    #begin a new list of child comments(this does not clear the old refernces)
                    tlc_children=[]
                    tlc_children.append( self.get_post_text_tuple(listing) ) #save the post_text of the top level comment
                else:
                    #collect the child comments. when depth=0 again, this list is reset.
                    child_comment=listing
                    #log('   : '+child_comment.getProperty('plot'))
                    tlc_children.append( self.get_post_text_tuple(child_comment) )
            else: #link in tlc
                if depth>0:
                    listing.setProperty('tlc_id',str(tlc_id))
                    listing.setProperty('non_tlc_link','true')

                listing.setProperty('tlc_id',str(tlc_id))
                self.items_for_listbox.append(listing)

        #don't forget to add the children of the last tlc
        self.child_lists.append(tlc_children)
        #log(pprint.pformat(self.child_lists))
        self.exit_monitor = ExitMonitor(self.close_gui)#monitors for abortRequested and calls close on the gui

        #can't dynamically create an auto-height textbox inside a grouplist
        #  so we make x of them in the xml and hope they're enough
        #  these are their id's
        self.x_controls=[x for x in range(1000, 1071)]
Beispiel #7
0
def nga_spider(id):
    url = f'https://bbs.nga.cn/read.php?tid={id}'
    try:
        options = webdriver.FirefoxOptions()
        options.add_argument('-headless')
        browser = webdriver.Firefox(options=options)
    except Exception:
        options = webdriver.chrome.options()
        options.add_argument('-headless')
        browser = webdriver.Chrome(options=options)
    # 打开隐藏部分
    click_button = """
    document.querySelectorAll(".collapse_btn button").forEach(function(each){each.click()})
    """
    browser.get(url)
    sleep(15)  # 让 NGA 自己渲染 bbcode
    browser.execute_script(click_button)
    content = browser.execute_script('return document.querySelector("html").innerHTML;')
    sleep(5)  # 防止报错:Failed to decode response from marionette
    browser.close()
    generator('NGA', get_meta(content, url), get_posts(content), get_date(content))
Beispiel #8
0
    def fit(self,
            X,
            y,
            M=None,
            epochs=1,
            updates_per_batch=1,
            samples=30,
            callback=None):
        """Trains the network with the given in X and y data.
        epochs: The iteration count over the whole dataset
        M: The size of the batch that should be itertated over for optimization
        updates_per_batch: The count of consecetive interations over the same batch
        samples: samples drawn from the mdoels for calucating grading descent
        callback: a function to be called every 1000 epochs while training the model
        """
        latent_vars = {}
        N = y.shape[0]
        for var, q_var in zip(self.priorWs, self.qWs):
            latent_vars[var] = q_var
        for var, q_var in zip(self.priorBs, self.qBs):
            latent_vars[var] = q_var

        if M is None:
            M = N

        n_batch = int(N / M)
        n_epoch = epochs
        data = ut.generator([X, y], M)

        inference = ed.KLqp(latent_vars, data={self.y: self.y_ph})
        inference.initialize(n_iter=n_epoch * n_batch * updates_per_batch,
                             n_samples=samples,
                             scale={self.y: N / M})
        tf.global_variables_initializer().run()

        print("Total iterations: " + str(inference.n_iter))

        for i in range(n_epoch):
            total_loss = 0
            for _ in range(inference.n_iter // updates_per_batch // n_epoch):
                X_batch, y_batch = next(data)
                for _ in range(updates_per_batch):
                    info_dict = inference.update({
                        self.y_ph: y_batch,
                        self.X: X_batch
                    })
                total_loss += info_dict['loss']

            print("Epoch " + str(i) + " complete. Total loss: " +
                  str(total_loss))
            if i % 1000 == 0 and callback is not None:
                callback(self, i)
Beispiel #9
0
def gans(ginputs, dinputs, choice, freeze_D):
	gout = generator(ginputs).get_G()
	gout = tf.cond(
		freeze_D,
		lambda: gout,
		lambda: tf.stop_gradient(gout))
	din = tf.cond(
		choice,
		lambda: tf.concat([gout, ginputs], axis = -1),
		lambda: tf.concat([dinputs, ginputs], axis = -1)
	)
	dout = discriminator(din, freeze_D).get_D()
	return gout, dout
Beispiel #10
0
def get_public_params(len_p):
    aa = 2**len_p
    bb = 2**(len_p + 1)
    #while True:
    p = utils.get_big_prime(aa, bb)
    # if sympy.isprime((p - 1) // 2):
    #    break
    print('genned p')
    g = utils.generator(p)
    # print('Сгенерированы параметры \n g: {} \n p: {}'.format(g, p))
    with open('log.log', 'a') as f:
        f.write('Сгенерированы параметры \n\t g: {} \n\t p: {}\n'.format(g, p))
    return g, p
    def define_network(self):

        # Generators
        # This one is used to generate fake data
        self.gen_b_fake = generator(self.X_shoes,
                                    self.initializer,
                                    scope_name="generator_sb")
        self.gen_s_fake = generator(self.X_bags,
                                    self.initializer,
                                    scope_name="generator_bs")

        # Reconstruction Generators
        # Note that parameters are being used from previous layers
        self.gen_recon_s = generator(self.gen_b_fake,
                                     self.initializer,
                                     scope_name="generator_sb",
                                     reuse=True)
        self.gen_recon_b = generator(self.gen_s_fake,
                                     self.initializer,
                                     scope_name="generator_bs",
                                     reuse=True)

        # Discriminator for Shoes
        self.disc_s_real = discriminator(self.X_shoes,
                                         self.initializer,
                                         scope_name="discriminator_s")
        self.disc_s_fake = discriminator(self.gen_s_fake,
                                         self.initializer,
                                         scope_name="discriminator_s",
                                         reuse=True)

        # Discriminator for Bags
        self.disc_b_real = discriminator(self.X_bags,
                                         self.initializer,
                                         scope_name="discriminator_b")
        self.disc_b_fake = discriminator(self.gen_b_fake,
                                         self.initializer,
                                         reuse=True,
                                         scope_name="discriminator_b")
Beispiel #12
0
    def onInit(self):
        xbmc.executebuiltin( "Dialog.Close(busydialog)" )
        #important to reset the listbox. when control comes back to this GUI(after calling another gui).
        #  kodi will "onInit" this GUI again. we end up adding items in gui_listbox
        #self.gui_listbox.reset()
        #self.exit_monitor = ExitMonitor(self.close_gui)#monitors for abortRequested and calls close on the gui

        if self.title_bar_text:
            self.ctl_title_bar = self.getControl(1)
            self.ctl_title_bar.setText(self.title_bar_text)

        if self.poster:
            #self.getControl(2).setImage(self.poster)
            self.getControl(3).setImage(self.poster)

        listing_generator=generator(self.listing)

        for control_id in self.x_controls:
            tx_control=self.getControl(control_id)
            bn_control=self.getControl(control_id+1000)
            img_control=self.getControl(control_id+2000)

            try:
                #gether the listitem properties and prepare to put them on the button
                li=listing_generator.next()
                link_url=li.getProperty('link_url')
                label=li.getLabel()
                label=label.replace(link_url, "")  #remove the http:... part to avoid it looking duplicated because it is already put as a label in button.
                thumb=li.getArt('thumb')
                log('item_type='+repr(li.getProperty('item_type')) )
                log('onClick_action='+repr(li.getProperty('onClick_action')) )
            except StopIteration:
                li=label=link_url=None

            if label:
                tx_control.setText(label)
                #xbmcgui.ControlButton object has no attribute 'setProperty'
                if link_url=='http://blank.padding':  #the regex generating this list is not perfect. a padding was added to make it work. we ignore it here.
                    bn_control.setVisible(False)
                else:
                    bn_control.setLabel(label=link_url)
            else:
                tx_control.setText(None)
                tx_control.setVisible(False)
                bn_control.setVisible(False)

            if thumb:
                img_control.setImage(thumb)
            else:
                img_control.setVisible(False) #hide the unused controls so they don't occupy 'space' in the grouplist
Beispiel #13
0
def train(training_data: list, model, model_name: str):
    full = []
    for td in training_data:
        with open(td, 'rb') as file:
            full += pickle.load(file)
    track1_df = pd.DataFrame([(i, a[0]) for i, a in full], columns=['Image', 'Angle'])
    train_samples, valid_samples = train_test_split(track1_df, test_size=0.2)
    train_generator = generator(train_samples)
    valid_generator = generator(valid_samples)
    es = EarlyStopping(monitor='val_loss', mode='auto', verbose=1, patience=5)
    cp = ModelCheckpoint(model_name + '.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
    history = model.fit(train_generator,
                        steps_per_epoch=ceil(len(train_samples)/32),
                        validation_data=valid_generator,
                        validation_steps=ceil(len(valid_samples)/32),
                        epochs=60, callbacks=[es, cp])
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model mean squared error loss')
    plt.ylabel('mean squared error loss')
    plt.xlabel('epoch')
    plt.legend(['training', 'validation'], loc='upper right')
    plt.savefig(model_name + '-plot')
Beispiel #14
0
def run_epoch(model, data_loader, criterion, optimizer=None, scheduler=None):
    if optimizer is None:
        assert (
            scheduler is None
        ), "If `scheduler` is provided, you must also specify an `optimizer`"
    total_loss = 0
    for (labels, outputs) in generator(model, data_loader):
        loss = criterion(outputs, labels)
        if optimizer:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()
        total_loss += loss.item()
    return total_loss / len(data_loader)
Beispiel #15
0
def get_prime_and_generator(key_length):
    while True:
        p = maurer_generator(key_length - 1)
        q = p * 2 + 1
        if millerrabin(q, 1):
            break
    print(p)
    print(q)
    a = set()
    while True:
        g = generator(q, a)
        a.add(g)
        print(g)
        if is_generator(p, q, g):
            break
    return q, g
Beispiel #16
0
def train_step(video_real, video_wrong, text):
    num_clips, t, h, w, c = video_real.shape
    word, sentence = bert([text])
    word, sentence = tf.convert_to_tensor(word), tf.convert_to_tensor(sentence)
    word = tf.squeeze(word)
    z = tf.random.normal(shape=(1, 100))

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        video_fake = generator(video_real, word, z)

        # All frames put together with bs = 1
        video_real = tf.reshape(video_real, (1, num_clips * t, h, w, c))
        video_wrong = tf.reshape(video_wrong, (1, num_clips * t, h, w, c))
        video_fake = tf.reshape(video_fake, (1, num_clips * t, h, w, c))

        # Discriminator out
        disc_video_real, disc_frame_real, disc_motion_real = discriminator(
            video_real, sentence)
        disc_video_wrong, disc_frame_wrong, disc_motion_wrong = discriminator(
            video_wrong, sentence)
        disc_video_fake, disc_frame_fake, disc_motion_fake = discriminator(
            video_fake, sentence)

        # Losses
        total_video_loss = video_loss(disc_video_real, disc_video_wrong,
                                      disc_video_fake)
        total_frame_loss = frame_loss(disc_frame_real, disc_frame_wrong,
                                      disc_frame_fake)
        total_motion_loss = motion_loss(disc_motion_real, disc_motion_wrong,
                                        disc_motion_fake)

        disc_loss = discriminator_loss(total_video_loss, total_frame_loss,
                                       total_motion_loss)
        gen_loss = generator_loss(disc_video_fake, disc_frame_fake,
                                  disc_motion_fake)

    gradients_of_generator = gen_tape.gradient(gen_loss,
                                               generator.trainable_variables)
    gradients_of_discriminator = disc_tape.gradient(
        disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(
        zip(gradients_of_generator, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(
        zip(gradients_of_discriminator, discriminator.trainable_variables))
    def train(self, train_data, operations, iter):

        loss = 0

        evaluator = self.getEvaluator()
        start_time = time.time()
        for x_train in utils.generator(train_data,
                                       operations.m_op,
                                       self.config,
                                       train=True):
            _, val, predicted_ner, actual_ner, predicted_rel, actual_rel, _, m_train = self.sess.run(
                [
                    operations.train_step, operations.obj,
                    operations.predicted_op_ner, operations.actual_op_ner,
                    operations.predicted_op_rel, operations.actual_op_rel,
                    operations.score_op_rel, operations.m_op
                ],
                feed_dict=x_train
            )  # sess.run(embedding_init, feed_dict={embedding_placeholder: wordvectors})

            if self.config.evaluation_method == "relaxed":
                evaluator.add(predicted_ner, actual_ner, predicted_rel,
                              actual_rel, m_train['BIO'])
            else:
                evaluator.add(predicted_ner, actual_ner, predicted_rel,
                              actual_rel)

            loss += val

        print('****iter %d****' % (iter))
        print('-------Train-------')
        print('loss: %f ' % (loss))

        if self.config.evaluation_method == "relaxed":
            evaluator.computeInfoMacro()
        else:
            evaluator.printInfo()

        elapsed_time = time.time() - start_time
        print("Elapsed train time in sec:" + str(elapsed_time))
        print()
    def evaluate(self, eval_data, operations, set):

        print('-------Evaluate on ' + set + '-------')

        evaluator = self.getEvaluator()
        for x_dev in utils.generator(eval_data,
                                     operations.m_op,
                                     self.config,
                                     train=False):
            predicted_ner, actual_ner, predicted_rel, actual_rel, _, m_eval = self.sess.run(
                [
                    operations.predicted_op_ner, operations.actual_op_ner,
                    operations.predicted_op_rel, operations.actual_op_rel,
                    operations.score_op_rel, operations.m_op
                ],
                feed_dict=x_dev)

            if self.config.evaluation_method == "relaxed":
                evaluator.add(predicted_ner, actual_ner, predicted_rel,
                              actual_rel, m_eval['BIO'])
            else:
                evaluator.add(predicted_ner, actual_ner, predicted_rel,
                              actual_rel)

        if self.config.evaluation_method == "relaxed":
            if set == 'test':
                evaluator.computeInfoMacro(printScores=True)
            else:
                evaluator.computeInfoMacro(printScores=True)
            if "other" in [
                    x.lower() for x in self.config.dataset_set_ec_tags
            ]:  # if other class exists report score without "Other" class, see previous work on the CoNLL04
                return evaluator.getMacroF1scoresNoOtherClass()[2]
            else:
                return evaluator.getMacroF1scores()[2]

        else:
            evaluator.printInfo(printScores=True)
            return evaluator.getChunkedOverallAvgF1()
Beispiel #19
0
def main(args):
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    # Prepare output directory
    # output directory counter
    out_counter = len([None for out in os.listdir(args.outdir) if args.algo in out])
    out = os.path.join(args.outdir, args.algo + '_' + str(out_counter + 1))
    if not os.path.exists(out):
        os.makedirs(out)

    config = {'data': args.data_path,
            'algo': args.algo,
            'batch_size': args.batch_size,
            'obs_shape': args.obs_size,
            'iteration': args.iteration,
            'D_step': args.D_step,
            'G_step': args.G_step,
            'g_optimizer': args.g_optimizer,
            'd_optimizer': args.d_optimizer,
            'gamma': args.gamma,
            'initia_learning_rate': args.initial_lr,
            'lr_schedules': args.lr_schedules,
            'c_vf': args.c_vf,
            'c_entropy': args.c_entropy,
            'c_l1': args.c_l1,
            'vf_clip': args.vf_clip,
            'leaky': args.leaky,
            'g_sn': args.g_sn,
            'd_sn': args.d_sn}
    with open(os.path.join(out, 'config.json'), 'w') as f:
        f.write(json.dumps(config, indent=4))

    # Load moving mnist
    data = np.load(args.data_path)
    obs_shape = [3, args.obs_size, args.obs_size, 1]
    # Start generator
    gen = generator(data,
            batch_size=args.batch_size, img_size=args.obs_size)
    def predict(self, eval_data, operations, set):

        print('-------Prediction on ' + set + '-------')

        evaluator = self.getEvaluator()
        for x_dev in utils.generator(eval_data,
                                     operations.m_op,
                                     self.config,
                                     train=False):
            predicted_ner, actual_ner, predicted_rel, actual_rel, _, m_eval = self.sess.run(
                [
                    operations.predicted_op_ner, operations.actual_op_ner,
                    operations.predicted_op_rel, operations.actual_op_rel,
                    operations.score_op_rel, operations.m_op
                ],
                feed_dict=x_dev)

            if self.config.evaluation_method == "relaxed":
                evaluator.add_test(predicted_ner, actual_ner, predicted_rel,
                                   actual_rel, m_eval['BIO'])
            else:
                evaluator.add_test(predicted_ner, actual_ner, predicted_rel,
                                   actual_rel)
Beispiel #21
0
    def populate_tlc_children(self,tlc_id):
        #controls_generator=generator(controls)
        child_comments_tuple_generator=generator(self.child_lists[tlc_id])

        for control_id in self.x_controls:
            control=self.getControl(control_id)

            try:
                post_text,author,depth=child_comments_tuple_generator.next()
            except StopIteration:
                post_text,author,depth=None,None,0

            if post_text:
                #control.setText( ("[B]"+repr(control_id-1000)+"[/B] " + post_text) if post_text else None)
                #control.setText(post_text+' '+author)
                #log(('.'*depth)+repr(post_text))
                control.setText(post_text)
            else:
                control.setText(None)
            #use animation to stagger the comments according to how deep they are
            control.setAnimations( [ animation_format(0,100,'slide', 0, (20*depth), 'sine', 'in' ) ] )

        #either there's no more child comments or we run out of controls
        return
Beispiel #22
0
    def build_model(self):
        noise_dim = self.config.noise_dim
        image_dim = self.config.image_dim
        learning_rate = self.config.learning_rate
        #create placeholders
        self.gen_input = tf.placeholder(tf.float32, shape=[None, noise_dim], name="input_noise")
        self.disc_input = tf.placeholder(tf.float32, shape=[None, image_dim], name="disc_input")

        self.parameters = give_me_parameters(self.config)
        #build generator network
        with tf.name_scope('generator') as scope:
            self.gen_sample = generator(self.gen_input, self.parameters)
        with tf.name_scope('discriminator') as scope:
            disc_real           = discriminator(self.disc_input, self.parameters)
            disc_fake           = discriminator(self.gen_sample, self.parameters)

        with tf.name_scope('loss') as scope:
            #make losses
            self.gen_loss = -tf.reduce_mean(tf.log(disc_fake))
            self.disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))

        with tf.name_scope('summaries') as scope:
            gen_images = tf.reshape(self.gen_sample, [-1, 28, 28, 1])
            tf.summary.scalar('Generative Loss', self.gen_loss)
            tf.summary.scalar('Discriminator Loss', self.disc_loss)
            #tf.summary.image("Generated image", gen_images)
            tf.summary.image("Generated image", gen_images, 100, family='generated_images')
            self.merged_summary = tf.summary.merge_all()
        with tf.name_scope('optimizers') as scope:
            #create Optimizers
            optimizer_gen  = tf.train.AdamOptimizer(learning_rate=learning_rate)
            optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)
            gen_vars  = give_gen_vars(self.parameters)
            disc_vars = give_disc_vars(self.parameters)
            self.train_gen = optimizer_gen.minimize(self.gen_loss, var_list=gen_vars)
            self.train_disc = optimizer_disc.minimize(self.disc_loss, var_list=disc_vars)
Beispiel #23
0
def train(gpu_num, epochs=15, load_path=None, if_test=False):
    """网络训练函数
    gpu_num:使用的gpu个数(>1)
    epochs:
    load_path:不为None时,将从该路径导入模型参数,在该参数基础再训练
    if_test:为True时,只进行测试。此时load_path不能为None
    """
    batch_size = 3 * gpu_num # 3 * gpu_num
    learing_rate = 1e-3
    print('load data>>>>')
    config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    sess = tf.Session(graph=tf.get_default_graph(), config=config)
    # (z,y,x)
    # window_size=(72,80,80), moving_size=(20,24,24),test_moving_size=(40,48,48)  
    data_loader = LobeData(window_size=(32, 224, 224), moving_size=(8, 56, 56), test_moving_size=(16, 112, 112),
                           train_path='./lobe_data2/train/', test_path='./lobe_data2/test/')
    if not if_test:
        train_images, train_labels, train_edges, train_coors = data_loader.load_train_data()
    #train_images, train_labels, train_dists,train_coors, val_images, val_labels, val_dists,val_coors  = data_loader.load_train_data(validation = True)
    test_images, test_labels, test_edges, test_coors, test_names = data_loader.load_test_data()
    train_coor_num = 0
    if not if_test:
        for temp_coors in train_coors:
            train_coor_num += len(temp_coors)
        print ("train_coor_num:", train_coor_num)
    # val_coor_num = 0
    # for temp_coors in val_coors:
    #   val_coor_num += len(temp_coors)
    # print ("val_coor_num:", val_coor_num)
    print('data loading complete!')

    print('model loaded>>>>')
    print('fitting model>>>>')


    K.set_session(sess)
    sess.run(tf.global_variables_initializer())

    result_folder=time.strftime("%Y%m%d%H%M", time.localtime())
    save_path=os.path.join('./results', 'unet_res_aug_data2_edge2_FA_GP_fissureAtt_fintune_attpredict' + result_folder)
    log_path = os.path.join(save_path, 'logs')
    mid_path = os.path.join(save_path, 'mid_res')
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    if not os.path.exists(mid_path):
        os.mkdir(mid_path)

    tb = TensorBoard(log_path)
    predict_callback = PredictCases(test_images, test_labels, test_coors, test_names, batch_size, save_path=mid_path,
                                    run_epoch=[0, 2, 5, 8, 10, 12, epochs-1])

    #stop = EarlyStopping(patience=4)  
    if if_test:
        model = unet_residual_fissureAtt(input_shape=(32, 224, 224, 1))
        model.load_weights(load_path)
        predict_cases(model, test_images, test_labels, test_coors, test_names, epoch=0, batch_size=batch_size, save_path=save_path)
        # predict_cases(model, test_images, test_labels, test_coors, test_names, epoch=0, batch_size=batch_size, save_path=save_path, save_only=True)
        return
    with tf.device('/cpu:0'):
        model = unet_residual_fissureAtt(input_shape=(32, 224, 224, 1))  #(32, 224, 224, 1)
    if load_path != None:
        model.load_weights(load_path, by_name=True)
    model.summary()
    checkpoint = MyCbk(model, path=save_path)
    if gpu_num == 1:
        with tf.device('/gpu:0'):
            model = unet_residual_fissureAtt(input_shape=(32, 224, 224, 1))
        if load_path != None:
            model.load_weights(load_path, by_name=True)
        checkpoint = MyCbk(model, path=save_path)
        parallel_model = model
    else:
        parallel_model = multi_gpu_model(model, gpus=gpu_num)
    f_loss1 = focal_loss(num_classes = 6)
    f_loss2 = focal_loss(num_classes = 2)
    dice_loss1 = dice_coef_loss([1, 1, 1, 1, 1, 1]) # [0.606, 0.0831, 0.0346, 0.0944, 0.0955, 0.0862]
    dice_loss2 = dice_coef_loss(None, classes_w_t2=[0., 1.])
    loss_weights = [0.7, 0.3] # [0.7, 0.3]  [1.0, 0.0]
    print("save_path:",save_path)
    print("load_path:",load_path)
    print("learing_rate:", learing_rate)
    print("loss_weights:", loss_weights)
    print("dice_loss1:", "dice_loss2")

    parallel_model.compile(optimizer=Adam(lr=learing_rate), loss=[dice_loss1, dice_loss2], loss_weights=loss_weights, metrics=[dice_coef]) #lr=1e-3
    # model.fit(x=image_train,y=label_train,batch_size=2,epochs=20,validation_split=0.1,callbacks=[tb])
    parallel_model.fit_generator(generator=generator(train_images, train_labels, train_edges, train_coors, batch_size=batch_size),
                                 steps_per_epoch=train_coor_num / batch_size,
                                 epochs=epochs,
                                 #validation_data=generator(val_images, val_labels, val_dists,val_coors, batch_size = batch_size),
                                 #validation_steps=val_coor_num / batch_size,
                                 verbose=1,
                                 max_queue_size=20,
                                 # callbacks=[tb, checkpoint, predict_callback]
                                 callbacks=[tb, checkpoint])
    model.save_weights(os.path.join(save_path, 'weight.h5'))
    del train_images, train_labels, train_edges, train_coors, model
    gc.collect()
    model = unet_residual_fissureAtt(input_shape=(32, 224, 224, 1))
    # batch_size = gpu_num * 2
    for ep in [0, 2, 5, 8, 10, 12, epochs-1]:
        model_path = os.path.join(save_path, "model_at_epoch_%d.h5"%ep)
        model.load_weights(model_path)
        predict_cases(model, test_images, test_labels, test_coors, test_names, epoch=ep, batch_size=batch_size, save_path=mid_path)
Beispiel #24
0
def get_x_generator():
    netG = utils.generator(params.uncon_shape[0], params.nz, params.uncon_ngf,
                           params.uncon_shape[2])
    state_dict = torch.load(params.uncon_cp_path)
    netG.load_state_dict(state_dict)
    return netG
Beispiel #25
0
def hello():
    form = SpeachForm()
    filename = ''
    if request.method == "POST":
        if form.validate_on_submit():
            filename = generator()
            #wit_response = client.message(str(form.text.data))
            c = conversation(form.text.data)
            response = nlu(form.text.data)
            #print(response)
            asd = json.loads(response)
            if (asd['entities'][0]['disambiguation']['subtype'][0]) == 'City':
                if asd['concepts'][0]['text'] == 'Weather' and asd['concepts'][
                        0]['relevance'] >= .9:
                    #print(asd['entities'][0]['text'])
                    r = requests.get(
                        'https://{}:{}@twcservice.eu-gb.mybluemix.net/api/weather/v3/location/search?query={}&language=en-US'
                        .format(USERNAME, PASSWORD,
                                asd['entities'][0]['text']))
                    res = r.json()
                    #print(res['location'], res['location']['latitude'], res['location']['longitude'])
                    #print(res['location']['latitude'][0], res['location']['longitude'][0])
                    g = geocoder.google([
                        res['location']['latitude'][0],
                        res['location']['longitude'][0]
                    ],
                                        method='reverse')
                    #print(g.json)
                    #r = requests.get('https://{}:{}@twcservice.eu-gb.mybluemix.net/api/weather/v1/geocode/{}/{}/observations.json'.format(USERNAME, PASSWORD, res['location']['latitude'][0], res['location']['longitude'][0]))
                    r = requests.get(
                        'https://{}:{}@twcservice.eu-gb.mybluemix.net/api/weather/v1/geocode/{}/{}/forecast/daily/7day.json'
                        .format(USERNAME, PASSWORD,
                                res['location']['latitude'][0],
                                res['location']['longitude'][0]))
                    res = r.json()
                    fields = [
                        'temp', 'pop', 'uv_index', 'narrative',
                        'phrase_12char', 'phrase_22char', 'phrase_32char'
                    ]
                    print(res['forecasts'][0]['narrative'])
                    #print(res['forecasts'][0]['night'])
                    print(res['forecasts'][0]['night']['narrative'])
                    text_to_speach(
                        'by day: ' + str(res['forecasts'][0]['narrative']) +
                        ', at night: ' +
                        str(res['forecasts'][0]['night']['narrative']),
                        filename)
                    #for i in res['metadata']:
                    #	print('METADATA : ' + str(i))
                    #r = requests.get('https://{}:{}@dda5b6bb-a242-4438-b1c9-9ed7edc4f2a6:[email protected]:443/'.format(USERNAME, PASSWORD))
            else:
                answer = json.loads(c)
                answer = answer['output']['text'][0]
                #flash('I heard you say: ' + str(form.text.data))
                #flash('Yay, got Wit.ai response: ' + str(wit_response))
                #flash('I understand: ' + response)
                #flash('Answer from bot: ' + str(c))
                flash('You: ' + str(form.text.data))
                flash('Julia: ' + str(answer))
                #text_to_speach(answer, filename)
            return render_template('index.html',
                                   form=form,
                                   play=True,
                                   filename=filename)
    return render_template('index.html',
                           form=form,
                           play=True,
                           filename=filename)
Beispiel #26
0
def get_hamming_accuracy(model, test_loader):
    scores = []
    for (labels, outputs) in generator(model, test_loader):
        score = _hamming_accuracy(labels, outputs)
        scores.append(score)
    return sum(scores) / len(scores)
Beispiel #27
0
    def train(self, train_data, operations, iter):
        self.is_train = True

        loss = 0

        evaluator = self.getEvaluator()
        if self.config.ner_classes == "BIO":
            tagset = self.config.dataset_set_bio_tags
            tag2id = {k: v for v, k in enumerate(tagset)}
            id2tag = {v: k for v, k in enumerate(tagset)}

            relset = self.config.dataset_set_bio_relation_ners
            rel2id = {k: v for v, k in enumerate(relset)}
            id2rel = {v: k for v, k in enumerate(relset)}

        start_time = time.time()
        for x_train in utils.generator(train_data,
                                       operations.m_op,
                                       self.config,
                                       train=True):
            _, val, m_train, transition_params1, entity1Scores, predEntity1, transition_params2, entity2Scores, predEntity2 = self.sess.run(
                [
                    operations.train_step, operations.obj, operations.m_op,
                    operations.transition_params1, operations.entity1Scores,
                    operations.predEntity1, operations.transition_params2,
                    operations.entity2Scores, operations.predEntity2
                ],
                feed_dict=x_train)

            for i in range(len(predEntity1)):
                trueNer, trueRel, predNer, predRel = [], [], [], []
                trueSeq = m_train['entity1_tags_ids'][i]
                predSeq = predEntity1[i]
                k1 = m_train['k1'][i]
                k2 = m_train['k2'][i]

                trueNer += utils.ConvertToEntity(trueSeq, tag2id)
                predNer += utils.ConvertToEntity(predSeq, tag2id)

                tners1 = utils.ConvertToEntity(trueSeq[k1:k2 + 1], tag2id)
                pners1 = utils.ConvertToEntity(predSeq[k1:k2 + 1], tag2id)

                trueSeq2 = m_train['entity2_tags_ids'][i]
                predSeq2 = predEntity2[i]
                tners2, trels = utils.getRel(trueSeq2, rel2id, id2rel)
                pners2, prels = utils.getRel(predSeq2, rel2id, id2rel)

                trueNer += tners2
                predNer += pners2

                trueRel = utils.collectNerAndRel(tners1, tners2, trels)
                predRel = utils.collectNerAndRel(pners1, pners2, prels)

                evaluator.add(predNer, trueNer, predRel, trueRel)
            loss += val

        print('****iter %d****' % (iter))
        print('-------Train-------')
        print('loss: %f ' % (loss))

        # if self.config.evaluation_method == "relaxed":
        #     evaluator.computeInfoMacro()
        # else:
        evaluator.printInfo()

        elapsed_time = time.time() - start_time
        print("Elapsed train time in sec:" + str(elapsed_time))
        return loss
from skimage.measure import label,regionprops
np.random.seed(1)

CLF_SIZE=256
parent_path="E:/Kaggle/RSNA Pneumonia Detection Challenge/"
model_dir=os.path.join(parent_path,"seg_models/COORD_ASPP_UNET/")
models=["model_0.h5","model_1.h5","model_2.h5"]

data_list=get_opacity_list()

chosed_idxs=np.random.choice(len(data_list),5,replace=False)

val_list=data_list[chosed_idxs]

avg_preds=np.zeros((len(val_list),CLF_SIZE,CLF_SIZE,1))
gen=generator(len(val_list),val_list,CLF_SIZE)

val_x,val_y=gen.__next__()
for model_p in models:

  model_path=os.path.join(model_dir,model_p) 
  model=MODEL(CLF_SIZE)
  
  model.load_weights(model_path)
  preds=model.predict(val_x,verbose=1)
  avg_preds+=preds

avg_preds/=len(models)

avg_preds=avg_preds>0.2
Beispiel #29
0
    def evaluate(self, eval_data, operations, set):
        import tensorflow as tf
        self.is_train = False

        print('-------Evaluate on ' + set + '-------')

        evaluator = self.getEvaluator()
        if self.config.ner_classes == "BIO":
            tagset = self.config.dataset_set_bio_tags
            tag2id = {k: v for v, k in enumerate(tagset)}
            id2tag = {v: k for v, k in enumerate(tagset)}

            relset = self.config.dataset_set_bio_relation_ners
            rel2id = {k: v for v, k in enumerate(relset)}
            id2rel = {v: k for v, k in enumerate(relset)}

        # lstm_out = self.lstm(reuse=True)

        for x_dev, doc in utils.generator(eval_data,
                                          operations.m_op,
                                          self.config,
                                          train=False):
            # print(doc.docId)

            trueNer, trueRel = utils.getNerAndRel(doc)
            predNer, predRel = [], []

            entity1Scores, predEntity1s = self.sess.run(
                [operations.entity1Scores, operations.predEntity1],
                feed_dict=x_dev)

            # lstm_out = tf.convert_to_tensor(lstm_out[0], dtype=tf.float32)

            for k in range(len(predEntity1s)):
                predEntity1 = predEntity1s[k]

                if self.config.ner_loss == "crf":
                    i = 0
                    while i < len(predEntity1):
                        tagid = predEntity1[i]
                        default = tag2id['O']
                        if tagid != default:
                            ner1 = utils.getNer(predEntity1, i, id2tag)
                            if ner1[0] == 'Other' or ner1[0] == 'other':
                                end = ner1[2]
                                i = end + 1
                                continue
                            predNer.append(ner1)
                            end = ner1[2]
                            k1 = np.asarray([i])
                            k2 = np.asarray([end])
                            x_dev[operations.m_op['k1']] = k1
                            x_dev[operations.m_op['k2']] = k2
                            predEntity2s = self.sess.run(
                                operations.predEntity2, feed_dict=x_dev)
                            predEntity2 = predEntity2s[0]
                            # print(len(predEntity2))
                            ners2, rels = utils.getRel(predEntity2, rel2id,
                                                       id2rel)
                            predNer += ners2
                            assert len(ners2) == len(rels)
                            for j in range(len(rels)):
                                r = rels[j]
                                ner2 = ners2[j]
                                e1 = (ner1[1], ner1[2])
                                e2 = (ner2[1], ner2[2])
                                predRel.append((e1, r, e2))
                            i = end + 1

                        else:
                            i += 1

            # elif self.config.ner_loss == "softmax":
            #     _, entity1Scores, label_matrix = self.sess.run([self.model1(lstm_out)])
            #     predEntity1 = self.sess.run([tf.cast(tf.arg_max(entity1Scores, 2), tf.int32)])
            #     predEntity1 = predEntity1[0]
            #     i = 0
            #     while i < len(predEntity1):
            #         tagid = predEntity1[i]
            #         default = tag2id['O']
            #         if tagid != default:
            #             ner1 = utils.getNer(predEntity1, i, id2tag)
            #             predNer.append(ner1)
            #             end = ner[2]
            #             fd = {m_op['k1']:np.asarray([i]), m_op['k2']:np.asarray([end])}
            #             entity2Scores = self.sess.run([self.model2(lstm_out, None, entity1Scores, label_matrix)], feed_dict=fd)
            #             predEntity2 = tf.cast(tf.arg_max(entity2Scores, 2), tf.int32)
            #             ners2, rels = utils.getRel(predEntity2, rel2id, id2rel)
            #             print(ners2)
            #             predNer += ners2
            #             assert len(ners2) == len(rels)
            #             for j in range(len(rels)):
            #                 r = rels[i]
            #                 ner2 = ners2[i]
            #                 e1 = (ner1[1], ner1[2])
            #                 e2 = (ner2[1], ner2[2])
            #                 predRel.append((e1, r, e2))
            #             i = end + 1
            #         else:
            #             i += 1

            # print(predNer)
            evaluator.add(predNer, trueNer, predRel, trueRel)
        evaluator.printInfo()
        return evaluator.getChunkedOverallAvgF1()
Beispiel #30
0
import tensorflow as tf
import os
import utils

batch_size = 100
iterations = 10000
im_size = 64
layer = 3 
z_size = 100
z_in = tf.placeholder(shape=[batch_size,z_size], dtype=tf.float32)
real_in = tf.placeholder(shape=[batch_size,im_size,im_size,layers], 

g = utils.generator(z_in,im_size, layers,batch_size)
d = utils.discriminator(real_in,batch_size

d_fake = utils.discriminator(g, batch_size, reuse=True)

d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d, tf.ones_like(d)))

d_loss = d_loss_fake 
	def __init__(self,batch_size=10,im_size=64,channels=3,dtype=tf.float32,analytics=True):
		self.analytics = analytics
		self.batch_size = batch_size

		self.x_a = tf.placeholder(dtype,[None,im_size,im_size,channels],name='xa')
		self.x_b = tf.placeholder(dtype,[None,im_size,im_size,channels],name='xb')

		#Generator Networks
		self.g_ab = utils.generator(self.x_a,name="gen_AB",im_size=im_size)
		self.g_ba = utils.generator(self.x_b,name="gen_BA",im_size=im_size)

		#Secondary generator networks, reusing params of previous two
		self.g_aba = utils.generator(self.g_ab,name="gen_BA",im_size=im_size,reuse=True)
		self.g_bab = utils.generator(self.g_ba,name="gen_AB",im_size=im_size,reuse=True)

		#Discriminator for input a
		self.disc_a_real = utils.discriminator(self.x_a,name="disc_a",im_size=im_size)
		self.disc_a_fake = utils.discriminator(self.g_ba,name="disc_a",im_size=im_size,reuse=True)

		#Discriminator for input b
		self.disc_b_real = utils.discriminator(self.x_b,name="disc_b")
		self.disc_b_fake = utils.discriminator(self.g_ab,name="disc_b",reuse=True)

		#Reconstruction loss for generators
		self.l_const_a = tf.reduce_mean(utils.huber_loss(self.g_aba,self.x_a))
		self.l_const_b = tf.reduce_mean(utils.huber_loss(self.g_bab,self.x_b))

		#Generation loss for generators 
		self.l_gan_a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_a_fake,labels=tf.ones_like(self.disc_a_fake)))
		self.l_gan_b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_fake,labels=tf.ones_like(self.disc_b_fake)))

		#Real example loss for discriminators
		self.l_disc_a_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_a_real,labels=tf.ones_like(self.disc_a_real)))
		self.l_disc_b_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_real,labels=tf.ones_like(self.disc_b_real)))

		#Fake example loss for discriminators
		self.l_disc_a_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_a_fake,labels=tf.zeros_like(self.disc_a_fake)))
		self.l_disc_b_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_b_fake,labels=tf.zeros_like(self.disc_b_fake)))

		#Combined loss for individual discriminators
		self.l_disc_a = self.l_disc_a_real + self.l_disc_a_fake
		self.l_disc_b = self.l_disc_b_real + self.l_disc_b_fake

		#Total discriminator loss
		self.l_disc = self.l_disc_a + self.l_disc_b

		#Combined loss for individual generators
		self.l_ga = self.l_gan_a + self.l_const_b
		self.l_gb = self.l_gan_b + self.l_const_a

		#Total GAN loss
		self.l_g = self.l_ga + self.l_gb

		#Parameter Lists
		self.disc_params = []
		self.gen_params = []

		for v in tf.trainable_variables():
			if 'disc' in v.name:
				self.disc_params.append(v)
			if 'gen' in v.name:
				self.gen_params.append(v)

		if self.analytics:
			self.init_analytics()

		self.gen_a_dir = 'generator a->b'
		self.gen_b_dir = 'generator b->a'
		self.rec_a_dir = 'reconstruct a'
		self.rec_b_dir = 'reconstruct b'
		self.model_directory = "models"	
	
		if not os.path.exists(self.gen_a_dir):
			os.makedirs(self.gen_a_dir)
		if not os.path.exists(self.gen_b_dir):
			os.makedirs(self.gen_b_dir)
		if not os.path.exists(self.rec_b_dir):
			os.makedirs(self.rec_b_dir)
		if not os.path.exists(self.rec_a_dir):
			os.makedirs(self.rec_a_dir)	

		self.sess = tf.Session()
		self.saver = tf.train.Saver()
Beispiel #32
0
def hupu_spider(id):
    url = f'https://bbs.hupu.com/{id}.html'
    generator('虎扑', get_meta(url), get_posts(url), get_date(url))
Beispiel #33
0
def tieba_spider(id):
    url = f'https://tieba.baidu.com/p/{id}?see_lz=1'
    generator('贴吧', get_meta(url), get_posts(url), get_date(url))