Beispiel #1
0
    def train(self, data, train_arguments):
        [cache_prefix, epochs, sample_interval] = train_arguments

        #Create a summary file
        train_summary_writer = tf.summary.create_file_writer(
            path.join('.', 'summaries', 'train'))

        # Adversarial ground truths
        valid = np.ones((self.batch_size, 1))
        fake = -np.ones((self.batch_size, 1))

        with train_summary_writer.as_default():
            for epoch in tqdm.trange(epochs, desc='Epoch Iterations'):

                for _ in range(self.n_critic):
                    # ---------------------
                    #  Train the Critic
                    # ---------------------
                    batch_data = self.get_data_batch(data, self.batch_size)
                    noise = tf.random.normal((self.batch_size, self.noise_dim))

                    # Generate a batch of events
                    gen_data = self.generator(noise)

                    # Train the Critic
                    d_loss_real = self.critic.train_on_batch(batch_data, valid)
                    d_loss_fake = self.critic.train_on_batch(gen_data, fake)
                    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                    for l in self.critic.layers:
                        weights = l.get_weights()
                        weights = [
                            np.clip(w, -self.clip_value, self.clip_value)
                            for w in weights
                        ]
                        l.set_weights(weights)

                # ---------------------
                #  Train Generator
                # ---------------------
                noise = tf.random.normal((self.batch_size, self.noise_dim))
                # Train the generator (to have the critic label samples as valid)
                g_loss = self._model.train_on_batch(noise, valid)
                # Plot the progress
                print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                      (epoch, d_loss[0], 100 * d_loss[1], g_loss))

                #If at save interval => save generated events
                if epoch % sample_interval == 0:
                    # Test here data generation step
                    # save model checkpoints
                    if path.exists('./cache') is False:
                        os.mkdir('./cache')
                    model_checkpoint_base_name = './cache/' + cache_prefix + '_{}_model_weights_step_{}.h5'
                    self.generator.save_weights(
                        model_checkpoint_base_name.format('generator', epoch))
                    self.critic.save_weights(
                        model_checkpoint_base_name.format('critic', epoch))
Beispiel #2
0
 def get_unique_label(self, lbl_list):
     uniq_label = []
     for i in tqdm.trange(len(lbl_list)):
         lbl_file = lbl_list[i]
         lbl = Image.open(lbl_file)
         for l in set(lbl.flatten()):
             if not l in uniq_label:
                 uniq_label.append(l)
     return uniq_label
Beispiel #3
0
    def get_coeffs(self, predicts, coefficients, alpha=0.001, iterations=100):
        best_coefficients = coefficients.clone()
        best_score = self._compute_score_with_coefficients(predicts, coefficients)

        for _ in tqdm.trange(iterations):
            counter = self._get_labels_distribution(predicts, coefficients)
            label = int(torch.argmax(counter).cpu())
            coefficients[label] -= alpha
            score = self._compute_score_with_coefficients(predicts, coefficients)
            if score > best_score:
                best_score = score
                best_coefficients = coefficients.clone()

        return best_coefficients.numpy()
def getEveryVideoCaptureImgDarkChannel(base_dir):
    count = 1
    with tqdm.trange(len(os.listdir('../data/airport/capture'))) as tbar:
        disp_dict = {}
        for name in glob('{}/*.jpg'.format(base_dir)):
            count += 1
            # print(name.split('\\')[1])
            m = deHaze(cv2.imread(name) / 255.0) * 255
            scipy.misc.imsave(
                '../data/airport/defog/defog_{}.jpg'.format(
                    name.split('/')[4].split('.')[0]), m)

            disp_dict.update({'num': count})
            tbar.update()
            tbar.set_description(desc='num')
            tbar.set_postfix(disp_dict)
            tbar.refresh()
def get_dark_channel_img(base_dir):
    with tqdm.trange(len(os.listdir(base_dir))) as tbar:
        count = 1
        disp_dict = {}
        for name in glob('{}/*'.format(base_dir)):
            count = count + 1
            # print(name.split('\\')[1])
            haze = np.array(Image.open(name))[:, :, 0:3] / 255
            dark = calculate_dark(haze)
            scipy.misc.imsave(
                '../data/airport/dark_channel/darkchannel_{}.jpg'.format(
                    name.split('/')[4].split('.')[0]), dark)

            disp_dict.update({'num': count})
            tbar.update()
            tbar.set_description(desc='num')
            tbar.set_postfix(disp_dict)
            tbar.refresh()
Beispiel #6
0
def query2(uids, batch_size=400, n_tries=5, timeout=5):
    """
        query: verbose, batched, unique uids, retry on failure
    """
    uids = np.unique(uids)
    all_ok, all_errors = {}, []
    for i in tqdm.trange(0, len(uids), batch_size):
        _uids = uids[i:i + batch_size]
        for _ in range(n_tries):
            ok, errors = query(_uids)
            if ok is not None:
                break
            print("query failed, waiting {} seconds until next try...".format(
                timeout))
            time.sleep(timeout)
        if ok is None:
            raise RuntimeError(
                "query failed {} times, aborting...".format(n_tries))
        all_ok.update(ok)
        all_errors += errors
    return all_ok, all_errors
def processData(data):
    import tqdm
    filteredData = data
    stemmer = PorterStemmer()
    stopWords = set(stopwords.words('english'))
    #process each review individually
    for i in tqdm.trange(len(data)):
        #Replace ' hex code with \'
        rev = str.replace(''',"'" ,filteredData.review[i])
        #Remove tokens not needed
        rev = str.replace(r'[^a-zA-Z \']+', '', rev)
        #Translate to lower case
        #rev = rev.lower()
        #Tokenize using NLTK's tokenizer
        rev = nltk.word_tokenize(rev)
        #Stem the words
        rev = [stemmer.stem(word) for word in rev]
        #negate
        rev = negate(rev)
        #Remove stopwords
        rev = [word for word in rev if word not in stopWords]
        filteredData.review[i] = rev
    return filteredData
Beispiel #8
0
def get_activations_from_ims(ims,
                             model,
                             batch_size=50,
                             dims=2048,
                             device=None,
                             verbose=False,
                             tqdm_position=None):
    """Calculates the activations of the pool_3 layer for all images.

    Params:
    -- files       : List of image files paths
    -- model       : Instance of inception model
    -- batch_size  : Batch size of images for the model to process at once.
                     Make sure that the number of samples is a multiple of
                     the batch size, otherwise some samples are ignored. This
                     behavior is retained to match the original FID score
                     implementation.
    -- dims        : Dimensionality of features returned by Inception
    -- cuda        : If set to True, use GPU
    -- verbose     : If set to True and parameter out_step is given, the number
                     of calculated batches is reported.
    Returns:
    -- A numpy array of dimension (num images, dims) that contains the
       activations of the given tensor when feeding inception with the
       query tensor.
    """
    model.eval()

    # if len(ims) % batch_size != 0:
    #     print(('Warning: number of images is not a multiple of the '
    #            'batch size. Some samples are going to be ignored.'))
    # if batch_size > len(ims):
    #     print(('Warning: batch size is bigger than the datasets size. '
    #            'Setting batch size to datasets size'))
    #     batch_size = len(ims)
    # print(len(files), batch_size)

    n_batches = (len(ims) + batch_size - 1) // batch_size
    n_used_imgs = len(ims)

    pred_arr = np.empty((n_used_imgs, dims))

    if tqdm_position is None or tqdm_position >= 0:
        import tqdm
        dataloader_tqdm = tqdm.trange(n_batches,
                                      desc='FID        ',
                                      position=tqdm_position,
                                      leave=False)
    else:
        dataloader_tqdm = range(n_batches)
    for i in dataloader_tqdm:
        start = i * batch_size
        end = start + batch_size
        if end > len(ims):
            end = len(ims)
        images = ims[start:end]
        if images.shape[1] != 3:
            images = images.transpose((0, 3, 1, 2))
        images /= 255

        batch = torch.from_numpy(images).type(torch.FloatTensor).to(device)
        # if cuda:
        #     batch = batch.cuda()
        with torch.no_grad():
            pred = model(batch)[0]

        # If model output is not scalar, apply global spatial average pooling.
        # This happens if you choose a dimensionality not equal 2048.
        # print(pred.shape)
        if pred.shape[2] != 1 or pred.shape[3] != 1:
            pred = adaptive_avg_pool2d(pred, output_size=(1, 1))

        pred_arr[start:end] = pred.cpu().data.numpy().reshape(end - start, -1)

    if verbose:
        print(' done')

    return pred_arr
Beispiel #9
0
 elif a.startswith("rmdir "):
     os.rmdir(a[6:])
     print("directory removed!")
 elif a == "usb-devices":
     myblkd = blkinfo.BlkDiskInfo()
     all_my_disks = myblkd.get_disks()
     for i in all_my_disks:
         print(i, "\n")
 elif a == "sleep":
     while True:
         time.sleep(1)
 elif a.startswith("sleep "):
     time.sleep(a[6:])
 elif a.startswith("compile "):
     os.system('nuitka3 ' + a[8:])
     for i in tqdm.trange(100):
         time.sleep(0.01)
 elif a == "hardware":
     hardware()
 elif a.startswith("pwd"):
     print(os.getcwd())
 elif a == "whoami":
     print(getpass.getuser())
 elif a.startswith("python"):
     os.system("python3.8")
 elif a == "myip":
     ip()
 elif a.startswith("ping "):
     while True:
         ping(a[5:], verbose=True, count=1)
 elif a.startswith("say "):