Пример #1
0
def compute_distances(img_index, layers, sub_models, embeddings, image_type,
                      tensor_root, dataset):
    for index, layer in enumerate(layers):
        embd = embeddings[layer]
        sub_model = sub_models[index]
        embd_vector = embd.data
        layer_softmax = compute_distances_layer(img_index, embd_vector,
                                                sub_model, layer, image_type,
                                                dataset)
        utils.save_tensor(
            layer_softmax, tensor_root + '/' + layer + '/' + str(img_index) +
            '_' + image_type + '_' + layer + '_softmax.pt')
        del layer_softmax
    def save_history(self,
                     filePath=None,
                     savePredHist=False,
                     saveTrainHist=True,
                     saveResults=False,
                     results=None,
                     resultsLabel='',
                     historyLabel=''):

        sep = self.sep
        resultsLabel = resultsLabel if resultsLabel is not '' else sep.join(
            (self.descr, 'results'))
        if filePath is not None:
            rootBasePath = filePath
            saveResFolder = os.path.join(saveFile, 'Logs', 'Results')
            saveHisFolder = os.path.join(saveFile, 'Logs', 'History')
        else:
            rootBasePath = self.rootSaveFolder
            saveResFolder = self.saveLogsFolder
            saveHisFolder = self.saveHisFolder
        folders = [saveResFolder, saveHisFolder]
        # Create the Target Directory if does not exist.
        for f in folders:
            if not os.path.exists(f):
                os.makedirs(f)
        if saveResults == True:
            saveResFile = os.path.join(
                saveResFolder,
                sep.join((self.defSavePrefix, resultsLabel, ".txt")))
        saveFile = os.path.join(saveHisFolder,
                                sep.join((self.defSavePrefix, "log1.txt")))

        # Save training history or predHistory as required.
        if saveTrainHist == True:
            utils.save_log(saveFile, self.history)
        if savePredHist == True:
            utils.save_log(saveFile, self.predHistory)
        # Save Results if required
        if saveResults == True:
            if results is not None:
                try:
                    utils.save_tensor(results, filePath=saveResFile)
                except (AttributeError, TypeError):
                    raise AssertionError(
                        'Input Results variable should be Tensor.')
            else:
                print("No Results Tensor to save is given.")
        return saveFile
Пример #3
0
                    input_fixed_length=params_extract['audio_len_samples'],
                    params_extract=params_extract)
                y = modify_file_variable_length(
                    data=y,
                    input_fixed_length=params_extract['audio_len_samples'],
                    params_extract=params_extract)

                # compute log-scaled mel spec. row x col = time x freq
                # this is done only for the length specified by loading mode (fix, varup, varfull)
                mel_spectrogram = get_mel_spectrogram(
                    audio=y, params_extract=params_extract)

                # save the T_F rep to a binary file (only the considered length)
                utils.save_tensor(var=mel_spectrogram,
                                  out_path=os.path.join(
                                      params_path.get('featurepath_tr'),
                                      f_name.replace('.wav', '.data')),
                                  suffix='_mel')

                # save also label
                utils.save_tensor(var=np.array([file_to_int[f_path]],
                                               dtype=float),
                                  out_path=os.path.join(
                                      params_path.get('featurepath_tr'),
                                      f_name.replace('.wav', '.data')),
                                  suffix='_label')

                if os.path.isfile(
                        os.path.join(
                            params_path.get('featurepath_tr'),
                            f_name.replace('.wav', suffix_in + '.data'))):
Пример #4
0
def anatomy(model, sub_models, test_loader, root, dataset, tensor_folder, net,
            layers):
    dataset_root = root + '/' + dataset + '_' + net
    img_root = dataset_root + '/img'
    tensor_root = dataset_root + '/' + tensor_folder
    index = -1

    results = []

    python_version = utils.python_version()  # check python version

    bar = Bar('Processing', max=len(test_loader))
    for data_origin, target_origin in test_loader:
        index += 1
        # Send the data and label to the device
        if python_version >= 3:
            target_origin = target_origin.cuda(
                non_blocking=True)  # non_blocking is used for python 3
        else:
            target_origin = target_origin.cuda(
                async=True)  # async=True is used for python 2.7

        data = torch.autograd.Variable(data_origin).cuda()
        target = torch.autograd.Variable(target_origin).cuda()

        # Forward pass the data through the model
        output, embeddings = model(data)

        init_pred = output.max(
            1, keepdim=True)[1]  # get the index of the max log-probability
        correct = 'correct'
        if init_pred.item() != target.item():
            correct = 'incorrect'

        # extract log softmax for each sub model
        compute_distances(index, layers, sub_models, embeddings, 'clean',
                          tensor_root, dataset)

        # extract log softmax of final output from target model
        if dataset == 'mnist' and net == 'lenet5':
            out_values = embeddings['fc3']
        else:  # models for cifar10, cifar100, imagenet
            out_values = embeddings['out']
        out_softmax = utils.softmax(out_values, dim=1)
        utils.save_tensor(
            out_softmax,
            tensor_root + '/out/' + str(index) + '_clean_out_softmax.pt')
        del embeddings, out_softmax
        # print('Clean pred:', init_pred.item(), 'Label:', target.item(), 'Result:', correct)
        line = [str(index), str(init_pred.item()), str(target.item()), correct]
        results.append(line[1:])
        line = ','.join(line)
        torch.cuda.empty_cache()
        bar.suffix = '({index}/{size}) | Total: {total:} | ETA: {eta:}'.format(
            index=index,
            size=len(test_loader),
            total=bar.elapsed_td,
            eta=bar.eta_td,
        )
        bar.next()
    bar.finish()

    utils.save_tensor(results, tensor_root + '/results.pt')
Пример #5
0
        for i, (real, _) in enumerate(loader):
            _, real_feats = get_inception_features(
                inception,
                real.to("cpu"),
                batch_size=EVAL_BATCH_SIZE,
                device=device,
                hook=get_avgpool,
            )
            real_features.append(real_feats)
            if EVAL_BATCH_SIZE * (i + 1) >= MAX_EVAL_SAMPLES:
                break
        real_features = torch.cat(real_features, dim=0)
        mu_real = real_features.mean(dim=0)
        sigma_real = get_covariance(real_features)
        print(f"number of real samples for evaluation: {len(real_features)}")
        save_tensor(mu_real, "mu", logdir)
        save_tensor(sigma_real, "sigma", logdir)

    # get statistics on fake samples
    fake_features = []  # required for FID
    fake_softmax = []  # required for inception score

    for _ in range(MAX_EVAL_SAMPLES // EVAL_BATCH_SIZE + 1):
        eval_noise = gen_noise(32, NOISE_DIM, device=device)
        fakes = gen(eval_noise)

        fake_logits, fake_feats = get_inception_features(
            inception,
            fakes.detach().cpu(),
            batch_size=EVAL_BATCH_SIZE,
            device=device,