Пример #1
0
def load(cont_path, style_path, device, img_size):
    '''
    A small function for loading and preparing the
    necessities.\n
    `cont_path`: Path/Link to the content image.\n
    `style_path`: Path/Link to the style image.\n
    `device`: The device for the model and the images.\n
    `img_size`: The desired size for the image.
    '''

    content_image = load_image(cont_path, device, img_size)
    _, _, w, h = content_image.shape
    style_image = load_image(style_path, device, (w, h))

    target = content_image.clone().requires_grad_(True).to(device)

    vgg = models.vgg19(pretrained=True).features.eval().to(device)

    content_features = get_features(content_image, vgg, layers)
    style_features = get_features(style_image, vgg, layers)

    style_grams = {
        layer: gram_matrix(style_features[layer])
        for layer in style_features
    }

    return content_features, style_grams, target, vgg
Пример #2
0
 def __get_features(self):
     features_data, X, y, y_bin = get_features(self.data.copy())
     self.features_data = features_data
     self.X = X
     self.y = y
     self.y_bin = y_bin
     print(X.shape)
Пример #3
0
 def predict(self, X):
     features = [
         get_features(x, is_using_pos_chunk=self.is_using_pos_chunk)
         for x in X
     ]
     tags = self.model.predict(features)
     return tags
Пример #4
0
def main(args):
    session_dir = create_session_dir(args.output_supdir)
    test_ae_loader = None
    ae_model = None
    if args.ae_model_path is None:
        train_ae_loader = get_data_loader(args.data_dir, args.src_image_x, 
                args.src_image_y, args.src_image_size, args.batch_size)
        test_ae_loader = get_data_loader(args.test_data_dir, args.test_image_x,
                args.test_image_y, args.test_image_size, args.batch_size)
        ae_model = make_ae_model(args.network, 19, 3) # TODO
        ae_trainer = AETrainer(
                model=ae_model,
                loaders=(train_ae_loader, test_ae_loader),
                session_dir=session_dir)
        ae_trainer.train()

    if not args.ae_model_only:
        cats = get_cats(args.cdl_file_path)
        num_cats = len(cats)
        print("Number of land cover categories above threshold: %d" \
                % (num_cats))
        if ae_model is None:
            ae_model = load_ae_model(args.ae_model_path, args.network, 
                    chip_size=19, bneck_size=3) # TODO
        if test_ae_loader is None:
            test_ae_loader = get_data_loader(args.data_dir, args.src_image_x, 
                    args.src_image_y, args.src_image_size, args.batch_size)
        features = get_features(ae_model, test_ae_loader) # TODO this should 
            # operate over an entire directory
        feats_path = pj(session_dir, "feats.npy")
        np.save(feats_path, features)
        print("Features saved to %s" % (feats_path))

    retain_session_dir(session_dir)
Пример #5
0
    def compute_match(self, image, database):
        image_hist_eq = utils.histogram_equalization(image)
        try:
            kp, des = utils.get_features(image_hist_eq)
        except:
            return False, None, None

        goodImages = []
        if self.entry == None:
            for entry in database.entries:
                #if AugmentedMaps.debug:
                print(f"Matching features with {entry.name}")
                self.entry = entry
                matches = utils.match_descriptors(entry.descriptors, des)
                if AugmentedMaps.debug:
                    print(f"Found {len(matches)} descriptor matches")

                if len(matches) >= 80:
                    print(f"Found a match: {entry.name}")
                    goodImages.append((matches, entry))
                #goodImages.append((matches, entry))
        else:
            matches = utils.match_descriptors(self.entry.descriptors, des)
            if len(matches) >= 80:
                goodImages.append((matches, self.entry))

        if goodImages == [] or len(goodImages) == 0:
            return False, None, None

        return True, kp, sorted(goodImages, key=lambda x: len(x[0]))
Пример #6
0
def train(raw_data,
          with_features,
          raw_dev=None,
          classi='DT',
          use_bin=False,
          use_all_liwc=False,
          name='',
          file='',
          quiet=False):
    if not quiet:
        if name != '':
            name = ' ' + utils.COLORS['blue'] + name + utils.RESET
        if file != '':
            file = ' using ' + utils.BOLD + file + utils.RESET
        add = name + file
        spin.set_strings('Training{0}...'.format(add),
                         'Trained{0}.'.format(add))
        spin.start()

    label0, label1, label2 = utils.get_reviews(raw_data)

    reviews = [(text, 'non_biased') for text in label0] + \
        [(text, 'moderated_biased') for text in label1] + \
        [(text, 'biased') for text in label2]
    print("with {} data".format(len(reviews)))

    train_data = [((utils.get_features(text,
                                       with_features,
                                       get_bin=use_bin,
                                       get_all_liwc=use_all_liwc)), label)
                  for text, label in reviews]

    if classi == 'DT':
        classifier = nltk.classify.DecisionTreeClassifier.train(
            train_data,
            entropy_cutoff=0.05,
            depth_cutoff=100,
            support_cutoff=10)
    elif classi == 'SciDT':
        classifier = SklearnClassifier(DecisionTreeClassifier()).train(
            train_data,
            entropy_cutoff=0.05,
            depth_cutoff=100,
            support_cutoff=10)
    elif classi == 'NB':
        classifier = nltk.classify.NaiveBayesClassifier.train(train_data)
    elif classi == 'SciNB':
        classifier = SklearnClassifier(BernoulliNB()).train(train_data)
    elif classi == 'SVM':
        classifier = SklearnClassifier(LinearSVC()).train(train_data)
    elif classi == 'LR':
        classifier = SklearnClassifier(LogisticRegression()).train(train_data)

    if not quiet:
        spin.stop()

    return classifier
Пример #7
0
def process_frame(frame, feature_set_type='all', labels=False):
    if len(frame.hands) != 1:
        return []
    if feature_set_type == 'all':
        return utils.get_features(frame.hands[0], labels=labels)
    if feature_set_type == 'fingers_only':
        return utils.get_finger_features(frame.hands[0], labels=labels)
    if feature_set_type == 'hands_only':
        return utils.get_hand_features(frame.hands[0], labels=labels)
Пример #8
0
def process_frame(frame, feature_set_type='all', labels=False):
    if len(frame.hands) != 1:
        #        print("Bad frame: Incorrect number of hands " + str(len(frame.hands)))
        return []
    if feature_set_type == 'all':
        return utils.get_features(frame.hands[0], labels=labels)
    if feature_set_type == 'fingers_only':
        return utils.get_finger_features(frame.hands[0], labels=labels)
    if feature_set_type == 'hands_only':
        return utils.get_hand_features(frame.hands[0], labels=labels)
Пример #9
0
    def _get_dataset():
        dataset = get_features()

        train_data = dataset[dataset['score'] > 0.0]
        test_data = dataset[dataset['score'] < 0.0]

        train_data.reset_index(inplace=True, drop=True)
        test_data.reset_index(inplace=True, drop=True)

        return train_data, test_data
def main():

    # Parameters
    data_directory = '../../data/generated-data-r-10-n-6-4/'
    features_path = '../../data/features-generated-data-r-10-n-6-4'
    booking_file = '../../data/booking.csv'
    users_file = '../../data/user.csv'
    rating_thresholds = []
    true_objects_indexes = [0, 1, 2, 3, 4, 5]
    false_objects_indexes = [6, 7, 8, 9]

    file_names = os.listdir(data_directory)
    img_ids_vector = [int(name.split('-')[0]) for name in file_names]
    ratings_vector = [int(name.split('-')[-2]) for name in file_names]
    name_vector = [data_directory + name for name in file_names]
    images_indexes = [name.split('-')[3].split('.')[0] for name in file_names]

    ratings_matrix, images_indexes_for_id, ids_indexes, users_matrix = load_data(
        data_directory, booking_file, users_file, rating_thresholds)

    features = get_features(features_path, name_vector)

    fa = FeatureAgglomeration(n_clusters=50)
    fa.fit(features)
    features = fa.transform(features)

    scores_auc = []
    scores_rmse = []
    for i in range(10):
        cv_results_file = '../results/cv-generated-data-r-10-n-6-4-rf-fa-' + str(
            i) + '.csv'
        selection = ObjectSelection(show_selection_results=False,
                                    selection_algorithm='rf')
        selection.transform(ids=img_ids_vector,
                            features=features,
                            ratings=ratings_vector,
                            users_ratings=ratings_matrix,
                            users=users_matrix,
                            cv_results_file=cv_results_file,
                            images_indexes=images_indexes,
                            true_objects_indexes=true_objects_indexes,
                            false_objects_indexes=false_objects_indexes,
                            paths=name_vector,
                            z_score=False)
        selection.evaluate(evaluation_metric='auc')
        selection.evaluate(evaluation_metric='rmse')
        print('\n\n-----\n\n')
        score_auc, score_rmse = selection.evaluate(evaluation_metric='auc')
        scores_auc.append(score_auc)
        scores_rmse.append(score_rmse)

    results_file = '../scores/generated-data-r-10-n-6-4-rf-fa-auc.csv'
    save_scores(scores_auc, results_file)
    results_file = '../scores/generated-data-r-10-n-6-4-rf-fa-rmse.csv'
    save_scores(scores_rmse, results_file)
Пример #11
0
def main():
    args = get_args()

    data = utils.load_data(args.data)
    X = utils.get_features(data)

    transformation = get_transformation(args.transformation, arg.n_components)

    with open(args.output, 'w') as f:
        output = transformation.fit_transform(X)
        output.to_csv(f, sep=",")
Пример #12
0
def build_docs(repo_name, temp_dir):
    path = join(temp_dir, repo_name)
    features = get_features(join(path, 'Cargo.toml'))
    command = [
        'bash', '-c',
        'cd {} && cargo doc --no-default-features --features "{}"'.format(
            path, features)
    ]
    if not exec_command_and_print_error(command):
        input(
            "Couldn't generate docs! Try to fix it and then press ENTER to continue..."
        )
    doc_folder = join(path, 'target/doc')
    try:
        file_list = ' '.join([
            '"{}"'.format(f) for f in listdir(doc_folder)
            if isfile(join(doc_folder, f))
        ])
    except Exception as e:
        write_error('Error occured in build docs: {}'.format(e))
        input(
            "It seems like the \"{}\" folder doesn't exist. Try to fix it then press ENTER..."
            .format(doc_folder))
    command = [
        'bash', '-c', 'cd {} && cp -r "{}" src/{} {} "{}"'.format(
            doc_folder, repo_name.replace('-', '_'),
            repo_name.replace('-', '_'), file_list,
            join(temp_dir, consts.DOC_REPO))
    ]
    if not exec_command_and_print_error(command):
        input(
            "Couldn't copy docs! Try to fix it and then press ENTER to continue..."
        )
    lines = get_file_content(join(path,
                                  'target/doc/search-index.js')).split('\n')
    before = True
    fill_extras = len(SEARCH_INDEX_BEFORE) == 0
    for line in lines:
        if line.startswith('searchIndex['):
            before = False
            # We need to be careful in here if we're in a sys repository (which should never be the
            # case!).
            if line.startswith('searchIndex["{}"]'.format(
                    repo_name.replace('-', '_'))):
                SEARCH_INDEX.append(line)
        elif fill_extras is True:
            if before is True:
                SEARCH_INDEX_BEFORE.append(line)
            else:
                SEARCH_INDEX_AFTER.append(line)
    input(
        "Couldn't find \"{}\" in `searchIndex.js`! Try to fix it and then press ENTER to \
          continue...".format(repo_name.replace('-', '_')))
Пример #13
0
def write_model_results(model, input_file, repr, tags, outpath):
    """
    Output model results on the test set
    """
    input, input_data = read_input(input_file)

    if repr == "c":
        x = utils.get_features(input, ixs=3)
    else:
        x = utils.get_features(input, chars=True)

    w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)
    labels = []
    for inputs, _ in w_batcher.get_batches():
        output = torch.max(model(inputs), 1)[1]
        labels += output.cpu().data.numpy().tolist()

    predictions = utils.NEWLINE.join(["{} {}".format(input_data[i], tags[labels[i]])\
                                 for i in range(len(input_data))])
    with open(outpath, "w") as outfile:
        outfile.write(predictions)
Пример #14
0
    def read_event(self, path, eta_cut=3.2):
        hits, cells, particles, truth = load_event(path)

        hits_features = get_features(hits)
        # apply the eta cuts on hits
        hits_features = hits_features[(hits_features['eta'] > eta_cut) |
                                      (hits_features['eta'] < -1 * eta_cut)]

        hits_with_truth = hits_features.merge(filter_truth(truth), on='hit_id')
        particles = pd.Series(np.unique(hits_with_truth['particle_id']))

        return hits_with_truth, particles
Пример #15
0
def main():
    if not os.path.exists('myData.h5py'):
        # prepare the data
        stereo_to_mono(stereo_folder, groundtruth_folder)
        compress(groundtruth_folder, input_folder)
        stereo_to_mono(eval_stereo_folder, eval_groundtruth_folder)
        compress(eval_groundtruth_folder, eval_input_folder)

        # extract features
        gt_features, _ = get_features(groundtruth_folder)
        input_features, _ = get_features(input_folder)
        eval_gt_features, _ = get_features(eval_groundtruth_folder)
        eval_input_features, _ = get_features(eval_input_folder)

        # shuffle features
        gt_features, input_features = unison_shuffled_copies(
            gt_features, input_features)
        eval_gt_features, eval_input_features = unison_shuffled_copies(
            eval_gt_features, eval_input_features)

        # save features
        save_features('myData.h5py', input_features, eval_input_features,
                      gt_features, eval_gt_features)
Пример #16
0
def run_for_game(game, model, offset, method, store_clusters):
    train_images = read_sorted_train_image_data(game, offset)
    test_images, gt_clusters, test_img_names = read_test_image_data(
        game, offset)

    if (method == 'hist'):
        train_features = utils.get_hist_features(train_images)
        test_features = utils.get_hist_features(test_images)
    elif (method == 'bag'):
        train_features, test_features = get_bag_features(
            train_images, test_images, game)
    else:
        train_features = utils.get_features(train_images, model)
        test_features = utils.get_features(test_images, model)

    kmeans = KMeans(n_clusters=2).fit(train_features)
    if store_clusters:
        dump(
            kmeans,
            utils.trained_models_dir + game + '_colors_kmeans_clusters.joblib')
    labels = kmeans.predict(test_features)

    return test_img_names, labels, gt_clusters
Пример #17
0
    def classify(self, text):
        """
        :param text: str
        :return: str
        """
        good_text = process_tweet(text)
        features = get_features(good_text)
        if not features:
            return None
        pos_feature_matrix = [self.pos_feature_prob.get(feature, 1/self.num_positive) for feature in features]
        neg_feature_matrix = [self.neg_feature_prob.get(feature, 1/self.num_negative) for feature in features]
        positive_prob = self.pos_prob * functools.reduce(lambda x, y: x*y, pos_feature_matrix)
        negative_prob = self.neg_prob * functools.reduce(lambda x, y: x*y, neg_feature_matrix)

        # what if positive_prob = negative_prob
        return 'Positive' if positive_prob > negative_prob else 'Negative'
Пример #18
0
def training_data_with_eta_cut(train_dir='input/train_1',
                               event_prefix="event000001000",
                               eta_cut=3.2):
    hits, cells, particles, truth = load_event(
        os.path.join(train_dir, event_prefix))

    hits_features = get_features(hits)
    # high_eta_hits = hits_features[(hits_features['eta'] > eta_cut) | (hits_features['eta'] < -1 * eta_cut)]
    high_eta_hits = hits_features[(hits_features['eta'] > eta_cut)]
    uID_for_higheta = make_uID(high_eta_hits)
    high_eta_hits_uID = pd.merge(high_eta_hits,
                                 uID_for_higheta,
                                 on=['volume_id', 'layer_id', 'module_id'])
    train_data_higheta = high_eta_hits_uID.merge(
        filter_truth(truth), on='hit_id')[['uID', 'particle_id']]
    return train_data_higheta, uID_for_higheta
Пример #19
0
def main(content_features, style_grams, target, model, learning_rate, alpha,
         beta, steps, result_path):
    '''
    A function which handle the forward and backward pass as well
    as the optimisation of the generated image.\n
    `content_features`: Dictionary which stores the content image features.\n
    `style_grams`: Dictionary which stores the style image gram matrices.\n
    `target`: Tensor which is to be optimised to get the stylised image.\n
    `model`: Model which is used for the style transfer.\n
    `learning_rate`: The learning rate for the optimizer.\n
    `alpha`: The weight of the content loss.\n
    `beta`: The weight of the style loss.\n
    `result_path:` Path where the image is supposed to be stored.
    '''

    optimizer = optim.Adam([target], lr=learning_rate)

    for step in range(1, steps + 1):

        total_loss = content_loss = style_loss = 0

        optimizer.zero_grad()

        target_features = get_features(target, model, layers)

        content_loss = torch.mean(
            (target_features['conv4_2'] - content_features['conv4_2'])**2)

        for layer in style_weights:
            target_feature = target_features[layer]
            _, c, w, h = target_feature.size()

            target_gram = gram_matrix(target_feature)
            style_gram = style_grams[layer]

            layer_style_loss = style_weights[layer] * torch.mean(
                (target_gram - style_gram)**2)

            style_loss += layer_style_loss / (c * w * h)

        total_loss = alpha * content_loss + beta * style_loss
        total_loss.backward(retain_graph=True)
        optimizer.step()

        if step % 400 == 0:
            print(f'Step({step}/{steps}) => Loss: {total_loss.item():.4f}')
            save_image(target, result_path)
Пример #20
0
    def visualize(self, show='ratings', visualization_algorithm='tsne'):
        """
        Draw features in 2D space using pca or tsne, color for categories or ratings

        :param show: show categories or ratings with different colors on the graph
        :param visualization_algorithm: pca or tsne
        """
        print('\nVisualize\n')
        file_names = os.listdir(self.data_directory)
        name_vector = [self.data_directory + '/' + name for name in file_names]
        categories = [name.split("-")[1] for name in file_names]
        ratings = [
            int(name.split(".")[0].split("-")[2]) for name in file_names
        ]
        features = get_features(self.features_file, name_vector)

        if show == 'categories':
            colors = COLORS_CATEGORIES
            title = 'Kategorije'
        else:
            colors = COLORS_RATINGS
            title = 'Ocene'

        if visualization_algorithm == 'pca':
            pca = PCA(n_components=2)
            components = pca.fit_transform(features)
        else:
            tsne = TSNE(n_components=2)
            components = tsne.fit_transform(features)

        fig = plt.figure(figsize=(8, 8))
        ax = fig.add_subplot(1, 1, 1)
        ax.set_title(title, fontsize=12)

        for index, point in enumerate(components):
            if show == 'categories':
                color = colors[categories[index]]
            else:
                color = colors[ratings[index]]
            ax.plot(point[0], point[1], marker='o', markersize=3, color=color)
        ax.grid()
        custom_lines = [
            Line2D([0], [0], color=value, lw=4) for value in colors.values()
        ]
        ax.legend(custom_lines, colors.keys())
        plt.show()
Пример #21
0
    def __init__(self, num_state, num_action, branching_factor, num_features):
        self.num_state = num_state
        self.num_action = num_action
        self.branching_factor = branching_factor
        self.num_features = num_features

        self.behavior_policy = utils.get_uniform_policy(num_state, num_action)
        self.state_action_trans_kernel = utils.get_random_state_action_trans_kernel(
            num_state, num_action)
        self.trans_kernel = np.einsum(
            'iij->ij',
            self.behavior_policy.dot(self.state_action_trans_kernel))
        self.features = utils.get_features(num_action, num_state, num_features)

        self.state_space = np.arange(num_state)
        self.action_space = np.arange(num_action)
        self.current_state = self.state_space[0]
        self.reward = np.random.uniform(size=num_state)
Пример #22
0
def main(args):
    bbox = [
        args.subregion_x, args.subregion_y,
        args.subregion_x + args.subregion_size,
        args.subregion_y + args.subregion_size
    ]
    data_loader, tb_chips = get_data_loader(args.data_npy_file, bbox)
    model = load_ae_model(args.model_path,
                          args.model_name,
                          chip_size=19,
                          bneck_size=3)

    rgb = get_rgb(tb_chips)
    features = get_features(model, data_loader, bbox)
    features = normalize_feats(features)
    cdl = get_cdl_chip(args.cdl_file_path, bbox)
    cdl, cat_dict = transform_cdl(cdl)
    write_cats_and_features(rgb, features, cdl, cat_dict)
    def next_target(self, mode, cuda, device_id):
        if mode == TRAIN_MODE: target_id = self.train.next_items(1)[0]
        elif mode == DEV_MODE: target_id = self.dev.next_items(1)[0]
        elif mode == TEST_MODE: target_id = self.test.next_items(1)[0]

        _1d_feature, _2d_feature = get_features(target_id)
        contact_map = read_contact_map(target_id)

        # Convert to FloatTensors
        _1d_feature = FloatTensor(np.expand_dims(_1d_feature, 0))
        _2d_feature = FloatTensor(np.expand_dims(_2d_feature, 0))
        contact_map = LongTensor(np.expand_dims(contact_map, 0))

        if cuda:
            _1d_feature = _1d_feature.cuda(device_id)
            _2d_feature = _2d_feature.cuda(device_id)
            contact_map = contact_map.cuda(device_id)

        return target_id, _1d_feature, _2d_feature, contact_map
Пример #24
0
    def __init__(self):
        state_trans_kernel = np.zeros((16, 16))
        state_trans_kernel[0, (1, 4)] = 0.25
        state_trans_kernel[0, 0] = 0.5
        state_trans_kernel[1, (0, 1, 2, 5)] = 0.25
        state_trans_kernel[2, (1, 2, 3, 6)] = 0.25
        state_trans_kernel[3, (2, 7)] = 0.25
        state_trans_kernel[3, 3] = 0.5
        state_trans_kernel[4, (0, 4, 5, 8)] = 0.25
        state_trans_kernel[5, 0] = 1.0
        state_trans_kernel[6, (2, 5, 7, 10)] = 0.25
        state_trans_kernel[7, 0] = 1.0
        state_trans_kernel[8, (4, 8, 9, 12)] = 0.25
        state_trans_kernel[9, (5, 8, 11, 13)] = 0.25
        state_trans_kernel[10, (6, 9, 11, 14)] = 0.25
        state_trans_kernel[11, 0] = 1.0
        state_trans_kernel[12, 0] = 1.0
        state_trans_kernel[13, (9, 12, 13, 14)] = 0.25
        state_trans_kernel[14, (10, 13, 14, 15)] = 0.25
        state_trans_kernel[15, 0] = 1.0

        reward = np.zeros(16)
        reward[-1] = 1.0

        self.num_state = 16
        self.num_action = 4
        self.num_features = 4

        self.behavior_policy = utils.get_uniform_policy(16, 4)

        self.state_action_trans_kernel = None

        self.trans_kernel = state_trans_kernel
        self.features = utils.get_features(4, 16, 4)

        self.state_space = np.arange(16)
        self.action_space = np.arange(4)
        self.current_state = self.state_space[0]
        self.reward = reward

        self._env = gym.make("FrozenLake-v0", is_slippery=False)
        self._env .reset()
Пример #25
0
def extract_features(directory):

    cities = []
    filenames = []
    input_features = []
    output_features = []

    files = glob.glob(directory)

    for name in files:
        with open(name, 'r') as f:
            city = cityiograph.City(f.read())
            cities.append(city)
            input_features.append(get_features(city))
            output_features.append(get_results(city))

    input_features = np.array(input_features).astype('int32')
    output_features = np.array(output_features).astype('int32')

    return cities, input_features, output_features
Пример #26
0
def load_data_helper(game, model, game_imgs, use_hist, threshold = ):       
    if use_hist:
        features = utils.get_hist_features(game_imgs)
    else:
        features = utils.get_features(game_imgs, model)
    
    #do clustering
    kmeans = KMeans(n_clusters=2).fit(features)
    
    #calculate soft clustering responsibilites  
    probs = soft_clustering_weights(np.asarray(features), kmeans.cluster_centers_)
    
    #get a subset with high probability for each cluster
    indx1 = np.where(probs[:,0] > threshold)
    indx2 = np.where(probs[:,1] > threshold) 
    size_cl1 = len(indx1[0])
    size_cl2 = len(indx2[0])
    
    print('high confidence ratio: ' + str((size_cl1 + size_cl2)/max_images))

    return indx1, indx2
Пример #27
0
def classify(classifier,
             raw_data,
             with_features,
             use_bin=False,
             use_all_liwc=False,
             name='',
             file='',
             quiet=False):
    if not quiet:
        if file != '':
            file = ' ' + utils.BOLD + file + utils.RESET
        if name != '':
            name = ' using ' + utils.COLORS['blue'] + name + utils.RESET
        add = file + name
        spin.set_strings('Clasifying{0}...'.format(add),
                         'Classified{0}.'.format(add))
        spin.start()

    label0, label1, label2 = utils.get_reviews(raw_data)
    # print(len(pos))
    reviews = [(text, 'non_biased') for text in label0] + \
              [(text, 'moderated_biased') for text in label1] + \
              [(text, 'biased') for text in label2]

    class_data = [((utils.get_features(text,
                                       with_features,
                                       get_bin=use_bin,
                                       get_all_liwc=use_all_liwc)), label)
                  for text, label in reviews]

    acc = nltk.classify.accuracy(classifier, class_data)

    if not quiet:
        spin.set_strings(
            se='Classified{0}. Accuracy: {1:.2f}%'.format(add, acc * 100))
        spin.stop()

    return acc, classifier.classify_many([
        feat for feat, label in class_data
    ]), [label for feat, label in class_data]
Пример #28
0
def train(optimizer, model, target, content_features, style_weights, style_grams, content_weight, style_weight, steps=2000, show_every=200):

    for ii in tqdm(range(1, steps+1)):
        
        # get the features from your target image
        target_features = get_features(target, model)

        # the content loss
        content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)
        
        # the style loss
        # initialize the style loss to 0
        style_loss = 0
        # then add to it for each layer's gram matrix loss
        for layer in style_weights:
            # get the "target" style representation for the layer
            target_feature = target_features[layer]
            target_gram = gram_matrix(target_feature)
            _, d, h, w = target_feature.shape
            # get the "style" style representation
            style_gram = style_grams[layer]
            # the style loss for one layer, weighted appropriately
            layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2)
            # add to the style loss
            style_loss += layer_style_loss / (d * h * w)
            
        # calculate the *total* loss
        total_loss = content_weight * content_loss + style_weight * style_loss
        
        # update your target image
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
        
        # display intermediate images and print the loss
        if  ii % show_every == 0:
            print('Total loss: ', total_loss.item())
            plt.imshow(tensor_to_image(target))
            plt.show()
    return target
Пример #29
0
    def transform(self, data):
        for sentiment, text in data:
            tweet = process_tweet(text)
            features = get_features(tweet)
            if sentiment.lower() == 'positive':
                self.positive_counter.update(features)
            elif sentiment.lower() == 'negative':
                self.negative_counter.update(features)
            else:
                print('Unknown label {label}'.format(label=sentiment))

        for word, frequency in self.positive_counter.items():
            yield Feature(
                word=word,
                sentiment='Positive',
                frequency=frequency,
            )

        for word, frequency in self.negative_counter.items():
            yield Feature(
                word=word,
                sentiment='Negative',
                frequency=frequency,
            )
Пример #30
0
    'conv4_1': 0.2,
    'conv5_1': 0.2
}

content_weight = opt.content_weight  # alpha
style_weight = opt.style_weight  # beta

if opt.train:
    vgg = models.vgg19(pretrained=True).features
    vgg.to(device)

    for params in vgg.parameters():
        params.requires_grad_(False)

    # get content and style features only once before training
    content_features = get_features(content, vgg)
    style_features = get_features(style, vgg)

    # calculate the gram matrices for each layer of our style representation
    style_grams = {
        layer: gram_matrix(style_features[layer])
        for layer in style_features
    }

    # create a third "target" image and prep it for change
    # it is a good idea to start off with the target as a copy of our *content* image
    # then iteratively change its style
    target = content.clone().requires_grad_(True).to(device)

    optimizer = optim.Adam([target], lr=0.003)
STEPS = 2500

model = model.getModel()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

content_image = utils.load_image(
    'https://www.rover.com/blog/wp-content/uploads/2019/01/6342530545_45ec8696c8_b-960x540.jpg'
).to(device)

style_image = utils.load_image(
    'https://images2.minutemediacdn.com/image/upload/c_crop,h_1595,w_2835,x_0,y_408/f_auto,q_auto,w_1100/v1556647489/shape/mentalfloss/62280-mona_lisa-wiki.jpg'
).to(device)

# get content and style features only once before training
content_features = utils.get_features(content_image, model)
style_features = utils.get_features(style_image, model)

# calculate the gram matrices for each layer of our style representation
style_grams = {
    layer: utils.gram_matrix(style_features[layer])
    for layer in style_features
}

# create a third "target" image and prep it for change
# it is a good idea to start off with the target as a copy of our *content* image
# then iteratively change its style
target = content_image.clone().requires_grad_(True).to(device)

# weights for each style layer
# weighting earlier layers more will result in *larger* style artifacts
Пример #32
0
 def test(self):
     if self._test is None:
         self._test = utils.get_features(data_set='test')
     return self._test
Пример #33
0
 def train(self):
     if self._train is None:
         self._train = utils.get_features(data_set='train')
     return self._train
Пример #34
0
    def transform(self, results_file=''):
        """
        Classify images for each provider and save predictions

        :param results_file: path to previously computed predictions
        """
        if path.exists(results_file):
            self.load_results(results_file)
            return

        file_names = os.listdir(self.data_directory)
        images_paths = [self.data_directory + '/' + name for name in file_names]
        ids_vector = [name.split('-')[0] for name in file_names]
        categories_vector = [name.split('-')[1] for name in file_names]
        ratings_vector = [int(name.split('-')[-2]) for name in file_names]
        features = get_features(self.features_path, images_paths)
        images_indexes = [name.split('-')[3].split('.')[0] for name in file_names]

        # Split data
        train_X, test_X, train_y, test_y, train_ids, test_ids, train_indexes, test_indexes = self.split_providers(ids_vector, ratings_vector, features)

        current_it = 0
        X = train_X
        y = train_y
        train_images_indexes = [x for index, x in enumerate(images_indexes) if index in train_indexes]

        # Test on all
        model = KNeighborsClassifier()
        model.fit(X, y)
        predicted = model.predict(test_X)
        ca = accuracy_score(test_y, predicted)

        print('- - - - -')
        print(X.shape)
        print('CA')
        print(ca)
        print('- - - - -')

        true_detected = 0
        false_detected = 0
        false_true_detected = 0
        false_false_detected = 0
        other = 0
        for i, index in enumerate(test_indexes):
            p = predicted[i] == test_y[i]
            if images_indexes[index] in str(self.true_objects_indexes) and p:
                true_detected += 1
            elif images_indexes[index] in str(self.false_objects_indexes) and not p:
                false_detected += 1
            elif images_indexes[index] in str(self.false_objects_indexes) and p:
                false_true_detected += 1
            elif images_indexes[index] in str(self.true_objects_indexes) and not p:
                false_false_detected += 1
            else:
                other += 1

        print('TP: ' + str(true_detected))
        print('TN: ' + str(false_detected))
        print('FP: ' + str(false_true_detected))
        print('FN: ' + str(false_false_detected))
        print('- - - - -\n')

        print(X.shape)
        print(len(y))
        print()
        ca = 0
        ca_new = 1

        # draw
        data = copy.deepcopy(train_images_indexes)
        data.sort()
        n_bins = list(set(images_indexes)).sort()
        print(n_bins)

        print()
        print('N TRUE: ')
        print(len([x for x in train_images_indexes if int(x) in self.true_objects_indexes]))
        print('N FALSE: ')
        print(len([x for x in train_images_indexes if int(x) in self.false_objects_indexes]))
        print()

        SMALL_SIZE = 16
        MEDIUM_SIZE = 16
        BIGGER_SIZE = 22

        rcParams.update({'figure.autolayout': True})
        plt.tight_layout()
        plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
        plt.rc('axes', titlesize=SMALL_SIZE)  # fontsize of the axes title
        plt.rc('axes', labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
        plt.rc('xtick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
        plt.rc('ytick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
        plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
        plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
        plt.xlabel('Indeks slike')
        plt.ylabel('Število slik')
        plt.title('Pred izbiranjem')
        plt.ylim(0, 250)

        _, _, patches = plt.hist(data, bins=n_bins)
        # plt.show()

        # fig, ax = plt.subplots()
        # data = train_images_indexes

        # N, bins, patches = ax.hist(data, edgecolor='white', linewidth=1)

        print(len(self.true_objects_indexes))
        print(len(self.false_objects_indexes))
        for i in range(0, len(self.true_objects_indexes)):
            patches[i].set_facecolor('b')
        for i in range(len(self.true_objects_indexes), 10):
            patches[i].set_facecolor('r')

        plt.show()

        while current_it < MAX_IT and abs(ca - ca_new) > E:
            correct_indexes = []
            kf = KFold(n_splits=3)
            ca = ca_new
            kfolds_ca = []
            print('- \n')
            for train_index, test_index in kf.split(X):
                current_train_X, current_test_X = X[train_index], X[test_index]
                current_train_y = [x for index, x in enumerate(y) if index in train_index]
                current_test_y = [x for index, x in enumerate(y) if index in test_index]
                #current_images_indexes = [x for index, x in enumerate(train_images_indexes) if index in test_index]

                model = KNeighborsClassifier()
                model.fit(current_train_X, current_train_y)
                predicted = model.predict(current_test_X)
                current_ca = accuracy_score(current_test_y, predicted)
                kfolds_ca.append(current_ca)
                for index, p in enumerate(predicted):
                    pass
                    #print(p == current_test_y[index])
                    #print(current_images_indexes[index])
                    #print('-')
                correct_indexes = correct_indexes + [test_index[index] for index, p in enumerate(predicted) if p == current_test_y[index]]
            ca_new = sum(kfolds_ca) / len(kfolds_ca)
            print('CA')
            print(ca_new)
            print()

            X = X[correct_indexes]
            y = [x for index, x in enumerate(y) if index in correct_indexes]
            train_images_indexes = [x for index, x in enumerate(train_images_indexes) if index in correct_indexes]
            current_it += 1
            print('Shape')
            print(X.shape)
            print(len(y))
            print()
            print('- \n')

            data = copy.deepcopy(train_images_indexes)
            data = [int(x) for x in data]
            data.sort()
            n_bins = list(set(images_indexes)).sort()
            print(n_bins)

            print()
            print('N TRUE: ')
            print(len([x for x in train_images_indexes if int(x) in self.true_objects_indexes]))
            print('N FALSE: ')
            print(len([x for x in train_images_indexes if int(x) in self.false_objects_indexes]))
            print()

            SMALL_SIZE = 16
            MEDIUM_SIZE = 16
            BIGGER_SIZE = 22

            rcParams.update({'figure.autolayout': True})
            plt.tight_layout()
            plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
            plt.rc('axes', titlesize=SMALL_SIZE)  # fontsize of the axes title
            plt.rc('axes', labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
            plt.rc('xtick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
            plt.rc('ytick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
            plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
            plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
            plt.xlabel('Indeks slike')
            plt.ylabel('Število slik')
            plt.title(str(current_it) + '. iteracija')
            plt.ylim(0, 250)

            _, _, patches = plt.hist(data, bins=n_bins)
            #plt.show()

            #fig, ax = plt.subplots()
            #data = train_images_indexes

            #N, bins, patches = ax.hist(data, edgecolor='white', linewidth=1)

            for i in range(0, len(self.true_objects_indexes)):
                patches[i].set_facecolor('b')
            for i in range(len(self.true_objects_indexes), 10):
                patches[i].set_facecolor('r')

            plt.show()


        train_images_indexes.sort()
        n_bins = list(set(images_indexes)).sort()
        plt.hist(train_images_indexes, bins=n_bins)
        plt.show()

        fig, ax = plt.subplots()
        data = train_images_indexes

        print()
        print('N TRUE: ')
        print(len([x for x in train_images_indexes if int(x) in self.true_objects_indexes]))
        print('N FALSE: ')
        print(len([x for x in train_images_indexes if int(x) in self.false_objects_indexes]))
        print()

        SMALL_SIZE = 16
        MEDIUM_SIZE = 16
        BIGGER_SIZE = 22

        plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
        plt.rc('axes', titlesize=SMALL_SIZE)  # fontsize of the axes title
        plt.rc('axes', labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
        plt.rc('xtick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
        plt.rc('ytick', labelsize=SMALL_SIZE)  # fontsize of the tick labels
        plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
        plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
        plt.xlabel('Indeks slike')
        plt.ylabel('Število slik')
        plt.title(str(current_it + 1) + '. iteracija')
        plt.ylim(0, 250)

        N, bins, patches = ax.hist(data, edgecolor='white', linewidth=1)

        print(len(self.true_objects_indexes))
        print(len(self.false_objects_indexes))
        for i in range(0, len(self.true_objects_indexes)):
            patches[i].set_facecolor('b')
        for i in range(len(self.true_objects_indexes), 10):
            patches[i].set_facecolor('r')

        plt.show()

        model = KNeighborsClassifier()
        model.fit(X, y)
        predicted = model.predict(test_X)
        ca = accuracy_score(test_y, predicted)

        print('- - - - -')
        print(X.shape)
        print('CA')
        print(ca)
        print('- - - - -')

        true_detected = 0
        false_detected = 0
        false_true_detected = 0
        false_false_detected = 0
        other = 0
        for i, index in enumerate(test_indexes):
            p = predicted[i] == test_y[i]
            if images_indexes[index] in str(self.true_objects_indexes) and p:
                true_detected += 1
            elif images_indexes[index] in str(self.false_objects_indexes) and not p:
                false_detected += 1
            elif images_indexes[index] in str(self.false_objects_indexes) and p:
                false_true_detected += 1
            elif images_indexes[index] in str(self.true_objects_indexes) and not p:
                false_false_detected += 1
            else:
                other += 1

        print('TP: ' + str(true_detected))
        print('TN: ' + str(false_detected))
        print('FP: ' + str(false_true_detected))
        print('FN: ' + str(false_false_detected))
        print('- - - - -\n')
def main(args):
    i_path = args.input_path
    m_path = args.mask_path
    bg_path = args.bg_path
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True

    camouflage_dir = args.output_dir
    os.makedirs(camouflage_dir, exist_ok=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    VGG = models.vgg19(pretrained=True).features
    VGG.to(device)

    for parameter in VGG.parameters():
        parameter.requires_grad_(False)

    style_net = HRNet.HRNet()
    style_net.to(device)

    transform = Compose([
        Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2(),
    ])

    # try to give fore con_layers more weight so that can get more detail in output iamge
    style_weights = args.style_weight_dic

    mask = cv2.imread(m_path, 0)
    mask = scaling(mask, scale=args.mask_scale)

    if args.crop:
        idx_y, idx_x = np.where(mask > 0)
        x1_m, y1_m, x2_m, y2_m = np.min(idx_x), np.min(idx_y), np.max(
            idx_x), np.max(idx_y)
    else:
        x1_m, y1_m = 0, 0
        y2_m, x2_m = mask.shape
        x2_m, y2_m = 8 * (x2_m // 8), 8 * (y2_m // 8)

    x1_m = 8 * (x1_m // 8)
    x2_m = 8 * (x2_m // 8)
    y1_m = 8 * (y1_m // 8)
    y2_m = 8 * (y2_m // 8)

    fore_origin = cv2.cvtColor(cv2.imread(i_path), cv2.COLOR_BGR2RGB)
    fore_origin = scaling(fore_origin, scale=args.mask_scale)
    fore = fore_origin[y1_m:y2_m, x1_m:x2_m]

    mask_crop = mask[y1_m:y2_m, x1_m:x2_m]
    mask_crop = np.where(mask_crop > 0, 255, 0).astype(np.uint8)
    kernel = np.ones((15, 15), np.uint8)
    mask_dilated = cv2.dilate(mask_crop, kernel, iterations=1)

    origin = cv2.cvtColor(cv2.imread(bg_path), cv2.COLOR_BGR2RGB)
    h_origin, w_origin, _ = origin.shape
    h, w = mask_dilated.shape
    assert h < h_origin, "mask height must be smaller than bg height, and lower mask_scale parameter!!"
    assert w < w_origin, "mask width must be smaller than bg width, and lower mask_scale parameter!!"

    print("mask size,height:{},width:{}".format(h, w))
    if args.hidden_selected is None:
        y_start, x_start = recommend(origin, fore, mask_dilated)
    else:
        y_start, x_start = args.hidden_selected

    x1, y1 = x_start + x1_m, y_start + y1_m
    x2, y2 = x1 + w, y1 + h
    if y2 > h_origin:
        y1 -= (y2 - h_origin)
        y2 = h_origin
    if x2 > w_origin:
        x1 -= (x2 - w_origin)
        x2 = w_origin

    print("hidden region...,height-{}:{},width-{}:{}".format(y1, y2, x1, x2))
    mat_dilated = fore * np.expand_dims(
        mask_crop / 255, axis=-1) + origin[y1:y2, x1:x2] * np.expand_dims(
            (mask_dilated - mask_crop) / 255, axis=-1)
    bg = origin.copy()
    bg[y1:y2,
       x1:x2] = fore * np.expand_dims(mask_crop / 255, axis=-1) + origin[
           y1:y2, x1:x2] * np.expand_dims(1 - mask_crop / 255, axis=-1)

    content_image = transform(image=mat_dilated)["image"].unsqueeze(0)
    style_image = transform(image=origin[y1:y2, x1:x2])["image"].unsqueeze(0)
    content_image = content_image.to(device)
    style_image = style_image.to(device)

    style_features = get_features(style_image, VGG, mode="style")
    if args.style_all:
        style_image_all = transform(
            image=origin)["image"].unsqueeze(0).to(device)
        style_features = get_features(style_image_all, VGG, mode="style")

    style_gram_matrixs = {}
    style_index = {}
    for layer in style_features:
        sf = style_features[layer]
        _, _, h_sf, w_sf = sf.shape
        mask_sf = (cv2.resize(mask_dilated, (w_sf, h_sf))).flatten()
        sf_idxes = np.where(mask_sf > 0)[0]
        gram_matrix = gram_matrix_slice(sf, sf_idxes)
        style_gram_matrixs[layer] = gram_matrix
        style_index[layer] = sf_idxes

    target = content_image.clone().requires_grad_(True).to(device)

    foreground_features = get_features(content_image, VGG, mode="camouflage")
    target_features = foreground_features.copy()
    attention_layers = [
        "conv3_1",
        "conv3_2",
        "conv3_3",
        "conv3_4",
        "conv4_1",
        "conv4_2",
        "conv4_3",
        "conv4_4",
    ]

    for u, layer in enumerate(attention_layers):
        target_feature = target_features[layer].detach().cpu().numpy(
        )  # output image's feature map after layer
        attention = attention_map_cv(target_feature)
        h, w = attention.shape
        if "conv3" in layer:
            attention = cv2.resize(attention, (w // 2, h // 2)) * 1 / 4
        if u == 0:
            all_attention = attention
        else:
            all_attention += attention
    all_attention /= 5
    max_att, min_att = np.max(all_attention), np.min(all_attention)
    all_attention = (all_attention - min_att) / (max_att - min_att)
    if args.erode_border:
        h, w = all_attention.shape
        mask_erode = cv2.erode(mask_crop, kernel, iterations=3)
        mask_erode = cv2.resize(mask_erode, (w, h))
        mask_erode = np.where(mask_erode > 0, 1, 0)
        all_attention = all_attention * mask_erode

    foreground_attention = torch.from_numpy(all_attention.astype(
        np.float32)).clone().to(device).unsqueeze(0).unsqueeze(0)
    b, ch, h, w = foreground_features["conv4_1"].shape
    mask_f = cv2.resize(mask_dilated, (w, h)) / 255
    idx = np.where(mask_f > 0)
    size = len(idx[0])
    mask_f = torch.from_numpy(mask_f.astype(
        np.float32)).clone().to(device).unsqueeze(0).unsqueeze(0)

    foreground_chi = foreground_features["conv4_1"] * foreground_attention
    foreground_chi = foreground_chi.detach().cpu().numpy()[0].transpose(
        1, 2, 0)
    foreground_cosine = cosine_distances(foreground_chi[idx])

    background_features = get_features(style_image, VGG, mode="camouflage")

    idxes = np.where(mask_dilated > 0)
    n_neighbors, n_jobs, reg = 7, None, 1e-3
    nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
    X_origin = origin[y1:y2, x1:x2][idxes] / 255
    nbrs.fit(X_origin)
    X = nbrs._fit_X
    Weight_Matrix = barycenter_kneighbors_graph(nbrs,
                                                n_neighbors=n_neighbors,
                                                reg=reg,
                                                n_jobs=n_jobs)

    idx_new = np.where(idxes[0] < (y2 - y1 - 1))
    idxes_h = (idxes[0][idx_new], idxes[1][idx_new])
    idx_new = np.where(idxes[1] < (x2 - x1 - 1))
    idxes_w = (idxes[0][idx_new], idxes[1][idx_new])

    mask_norm = mask_crop / 255.
    mask_norm_torch = torch.from_numpy(
        (mask_norm).astype(np.float32)).unsqueeze(0).unsqueeze(0).to(device)
    boundary = (mask_dilated - mask_crop) / 255
    boundary = torch.from_numpy(
        (boundary).astype(np.float32)).unsqueeze(0).unsqueeze(0).to(device)

    content_loss_epoch = []
    style_loss_epoch = []
    total_loss_epoch = []
    time_start = datetime.datetime.now()
    epoch = 0
    show_every = args.show_every
    optimizer = optim.Adam(style_net.parameters(), lr=args.lr)
    steps = args.epoch
    mse = nn.MSELoss()
    while epoch <= steps:
        #############################
        ### boundary conceal ########
        #############################
        target = style_net(content_image).to(device)
        target = content_image * boundary + target * mask_norm_torch
        target.requires_grad_(True)

        target_features = get_features(
            target, VGG)  # extract output image's all feature maps

        #############################
        ### content loss    #########
        #############################
        target_features_content = get_features(target, VGG, mode="content")
        content_loss = torch.sum((target_features_content['conv4_2'] -
                                  foreground_features['conv4_2'])**2) / 2
        content_loss *= args.lambda_weights["content"]

        #############################
        ### style loss      #########
        #############################
        style_loss = 0

        # compute each layer's style loss and add them
        for layer in style_weights:
            target_feature = target_features[
                layer]  # output image's feature map after layer
            #target_gram_matrix = get_gram_matrix(target_feature)
            target_gram_matrix = gram_matrix_slice(target_feature,
                                                   style_index[layer])
            style_gram_matrix = style_gram_matrixs[layer]
            b, c, h, w = target_feature.shape
            layer_style_loss = style_weights[layer] * torch.sum(
                (target_gram_matrix - style_gram_matrix)**2) / (
                    (2 * c * w * h)**2)
            #layer_style_loss = style_weights[layer] * torch.mean((target_gram_matrix - style_gram_matrix) ** 2)
            style_loss += layer_style_loss

        style_loss *= args.lambda_weights["style"]

        #############################
        ### camouflage loss #########
        #############################
        target_chi = target_features["conv4_1"] * foreground_attention
        target_chi = target_chi.detach().cpu().numpy()[0].transpose(1, 2, 0)
        target_cosine = cosine_distances(target_chi[idx])

        leave_loss = (np.mean(np.abs(target_cosine - foreground_cosine)) / 2)
        leave_loss = torch.Tensor([leave_loss]).to(device)

        remove_matrix = (1.0 - foreground_attention) * mask_f * (
            target_features["conv4_1"] - background_features["conv4_1"])
        r_min, r_max = torch.min(remove_matrix), torch.max(remove_matrix)
        remove_matrix = (remove_matrix - r_min) / (r_max - r_min)
        remove_loss = (torch.mean(remove_matrix**2) / 2).to(device)

        camouflage_loss = leave_loss + args.mu * remove_loss
        camouflage_loss *= args.lambda_weights["cam"]

        #############################
        ### regularization loss #####
        #############################

        target_renormalize = target.detach().cpu().numpy()[0, :].transpose(
            1, 2, 0)
        target_renormalize = target_renormalize * np.array(
            (0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
        target_renormalize = target_renormalize.clip(0, 1)[idxes]
        target_reconst = torch.from_numpy(
            (Weight_Matrix * target_renormalize).astype(np.float32))
        target_renormalize = torch.from_numpy(
            target_renormalize.astype(np.float32))
        reg_loss = mse(target_renormalize, target_reconst).to(device)
        reg_loss *= args.lambda_weights["reg"]

        #############################
        ### total variation loss ####
        #############################
        tv_h = torch.pow(target[:, :, 1:, :] - target[:, :, :-1, :],
                         2).detach().cpu().numpy()[0].transpose(1, 2, 0)
        tv_w = torch.pow(target[:, :, :, 1:] - target[:, :, :, :-1],
                         2).detach().cpu().numpy()[0].transpose(1, 2, 0)
        tv_h_mask = tv_h[:, :,
                         0][idxes_h] + tv_h[:, :,
                                            1][idxes_h] + tv_h[:, :,
                                                               2][idxes_h]
        tv_w_mask = tv_w[:, :,
                         0][idxes_w] + tv_w[:, :,
                                            2][idxes_w] + tv_w[:, :,
                                                               2][idxes_w]
        tv_loss = torch.from_numpy(
            (np.array(np.mean(np.concatenate([tv_h_mask,
                                              tv_w_mask]))))).to(device)
        tv_loss *= args.lambda_weights["tv"]

        total_loss = content_loss + style_loss + camouflage_loss + reg_loss + tv_loss
        total_loss_epoch.append(total_loss)

        style_loss_epoch.append(style_loss)

        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()

        if epoch % show_every == 0:
            print("After %d criterions:" % epoch)
            print('Total loss: ', total_loss.item())
            print('Style loss: ', style_loss.item())
            print('camouflage loss: ', camouflage_loss.item())
            print('camouflage loss leave: ', leave_loss.item())
            print('camouflage loss remove: ', remove_loss.item())
            print('regularization loss: ', reg_loss.item())
            print('total variation loss: ', tv_loss.item())
            print('content loss: ', content_loss.item())
            print("elapsed time:{}".format(datetime.datetime.now() -
                                           time_start))
            canvas = origin.copy()
            fore_gen = im_convert(target) * 255.
            sub_canvas = np.vstack(
                [mat_dilated, fore_gen, origin[y1:y2, x1:x2]])
            canvas[y1:y2, x1:x2] = fore_gen * np.expand_dims(
                mask_norm, axis=-1) + origin[y1:y2, x1:x2] * np.expand_dims(
                    1.0 - mask_norm, axis=-1)
            canvas = canvas.astype(np.uint8)
            if args.save_process:
                new_path = os.path.join(
                    camouflage_dir, "{}_epoch{}.png".format(args.name, epoch))
                cv2.imwrite(new_path, cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR))
            cv2.rectangle(canvas, (x1, y1), (x2, y2), (255, 0, 0), 10)
            cv2.rectangle(canvas, (x1 - x1_m, y1 - y1_m), (x2, y2),
                          (255, 255, 0), 10)
            canvas = np.vstack([canvas, bg])
            h_c, w_c, _ = canvas.shape
            h_s, w_s, _ = sub_canvas.shape
            sub_canvas = cv2.resize(sub_canvas, (int(w_s * (h_c / h_s)), h_c))
            canvas = np.hstack([sub_canvas, canvas])
            canvas = canvas.astype(np.uint8)
            canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
            h_show, w_show, c = canvas.shape
            cv2.imshow(
                "now camouflage...",
                cv2.resize(
                    canvas,
                    (w_show // args.show_comp, h_show // args.show_comp)))

        epoch += 1
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    time_end = datetime.datetime.now()
    print('totally cost:{}'.format(time_end - time_start))
    new_path = os.path.join(camouflage_dir, "{}.png".format(args.name))
    canvas = origin.copy()
    fore_gen = im_convert(target) * 255.
    canvas[y1:y2,
           x1:x2] = fore_gen * np.expand_dims(mask_norm, axis=-1) + origin[
               y1:y2, x1:x2] * np.expand_dims(1.0 - mask_norm, axis=-1)
    canvas = canvas.astype(np.uint8)
    canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
    cv2.imwrite(new_path, canvas)
Пример #36
0
def searchForNeighbors(flann, imageFileName, numberOfNeighbors, layer, algorithm, dimensions):
    features = utils.get_features(imageFileName, layer, algorithm, dimensions)

    return flann.nn_index(features, numberOfNeighbors, checks=128)
Пример #37
0
def gen_models_to_run(experiment):
    # get values
    years_train = experiment['years_train']
    features = utils.get_features(experiment)
    grid_size = experiment['grid_size']
    #n_folds = experiment['n_folds']
    costs = experiment['costos']
    features_table_prefix = experiment['features_table_name']
    labels_table_prefix = experiment['labels_table_name']
    intersect_percent = experiment['intersect_percent']

    # get data
    train_index, train_x, train_y, train_costs = utils.get_data(years_train,
                                            features,
                                            grid_size,
                                            features_table_prefix,
                                            labels_table_prefix,
                                            intersect_percent)

    # Magic Loop
    for model in experiment["model"]:
        print('Using model: {}'.format(model))
        parameter_names = sorted(experiment["parameters"][model])
        parameter_values = [experiment["parameters"][model][p] for p in parameter_names]
        all_params = product(*parameter_values)

        for each_param in all_params:
            print('With parameters: {}'.format(each_param))
            print('-----------------------------')
            timestamp = datetime.datetime.now()
            parameters = {name: value for name, value
                              in zip(parameter_names, each_param)}
            # Train
            print('training')
            modelobj, importances = train(train_x,
                                          train_y,
                                          train_costs,
                                          model,
                                          parameters,
                                          2)
            # Store model
            model_id = utils.store_train(timestamp,
                                         model,
                                         parameters,
                                         features,
                                         years_train,
                                         grid_size,
                                         intersect_percent,
                                         costs,
                                         experiment['model_comment'])

            print('Model id: {}'.format(model_id))
            utils.store_importances(model_id, features, importances)

            for year_test in experiment['year_tests']:
                print('testing')
                print('For year {}'.format(year_test))
                test_index, test_x, test_y, test_costs = utils.get_data([year_test],
                                                                        features,
                                                                        grid_size,
                                                                        features_table_prefix,
                                                                        labels_table_prefix,
                                                                        intersect_percent)

                scores = predict_model(modelobj, test_x)
                utils.store_predictions(model_id,
                                        year_test,
                                        test_index,
                                        scores,
                                        test_y)
                print('scoring')
                metrics = scoring.calculate_all_evaluation_metrics(test_y.tolist(),
                                                                   scores,
                                                                   test_costs)



                utils.store_evaluations(model_id, [year_test], metrics)

                print('Cool')
                print('--------------------------')
                print('--------------------------')

        print('Done!')