Esempio n. 1
0
def find_neighbors(train_set, test_sample, radius):
    test_scaled = extract_features(test_sample)
    neighbors = {}
    mapping = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0}
    
    for img_class, class_item in enumerate(train_set):
        for img_add in class_item:
            img_scaled = extract_features(img_add)

            dist = np.linalg.norm(test_scaled - img_scaled)

            if dist < radius:
                neighbors[str(img_class) + '/' + img_add[img_add.rfind('/') + 1:-4]] = dist
        
    if len(neighbors.keys()) != 0:
        for key in neighbors.keys():
            idx = int(key[:key.find('/')])
            mapping[idx] = mapping[idx] + 1
            
        mapping = { k:v for k, v in mapping.items() if v }
        print('-------------')
        print(mapping)
        prediction = max(mapping.items(), key=operator.itemgetter(1))[0]
        print('test blenogs to class ' + str(prediction))
        return prediction
    else:
        print('-------------')
        print('there is no neighbor in the given radius')
        return False
Esempio n. 2
0
def sample_one_reward(theta, env):
    this_trajectory_reward = []
    this_trajectory_grads = []

    #first, initialize the observation
    observation = env.reset()
    current_feature = utils.extract_features(observation)

    for time_index in range(0, 200):

        #compute an action given current observation
        action = utils.compute_action(theta, current_feature)
        #print('the action is',action)

        #apply the action to the environment
        observation, reward = env.step(action)
        #print('the obs is',observation)
        #print('the reward is',reward)
        #print(' ')

        this_trajectory_reward.append(reward)
        log_policy_grad = utils.compute_log_policy_grad(
            theta, current_feature, action)
        this_trajectory_grads.append(log_policy_grad)

        current_feature = utils.extract_features(observation)

    return this_trajectory_reward, this_trajectory_grads
Esempio n. 3
0
    def think(self, game):
        import operator
        self._pattern = utils.extract_features(game.board.board, config.pattern_file_name)
        legal_moves = game.board.get_legal_moves()
        values_dict = {}
        tmp_board = game.board.board
        pattern_array = []
        for x, y in legal_moves:
            tmp_board[x][y] = game.current_player.stone_color
            pattern_array.append(utils.extract_features(tmp_board, config.pattern_file_name))
            #print(self._pattern)
            #print(value)
            tmp_board[x][y] = '.'

        values = self.CNN.run_value(pattern_array)
        for index, (x, y) in enumerate(legal_moves):
            #print(values[index])
            values_dict[(x, y)] = values[index]
        if game.current_player.stone_color == 'b':
            max_point = max(values_dict.items(), key=operator.itemgetter(1))[0]
        else:
            max_point = min(values_dict.items(), key=operator.itemgetter(1))[0]
        occurence = utils.pattern_occurrence(game.board.board, utils.file_to_patterns("pattern.txt"))
        print(occurence)
        print(max_point)
        print(values_dict[max_point])
        print(self.CNN.run_value([self._pattern])[0])

        return max_point

        # wait until move event set
        self._move_event.clear()
        self._move_event.wait()
        self._move_event.clear()
        return self._next_move
Esempio n. 4
0
    def __init__(self, csv_file, root_dir, batch_size=32, transform=None):
        """
        Args:
            csv_file (string): Path to the csv file with annotations.
            root_dir (string): Directory with all the images.
            transform (callable, optional): Optional transform to be applied
                on a image.
        """
        self.transform = transform
        self.root_dir = root_dir
        self.csv_file = os.path.join(self.root_dir, csv_file)
        self.landmarks_frame = csv.DictReader(open(self.csv_file))
        self.dictionary = {}
        for line in self.landmarks_frame:
            key = (line["img"], line["subject"])
            self.dictionary.setdefault(key, []).append(process_example(line))

        #create a list of keys
        self.keys = list(self.dictionary.keys())

        #computing features for all images
        self.feature_filename = os.path.join(self.root_dir, csv_file + ".pkl")
        if not os.path.exists(self.feature_filename):
            utils.extract_features(root_dir,
                                   keys=self.keys,
                                   layer="layer3",
                                   transform=self.transform,
                                   output_file=self.feature_filename)

        self.features = pickle.load(open(self.feature_filename, 'rb'))
Esempio n. 5
0
def sample_one_trajectory(theta, env):
    this_trajectory_reward = []
    this_trajectory_grads = []

    #first, initialize the observation
    observation = env.reset()
    current_feature = utils.extract_features(observation, C.output_dim)

    for time_index in range(0, 200):

        #compute an action given current observation
        action = utils.compute_action_distribution(theta,
                                                   current_feature,
                                                   mode='train')

        #apply the action to the environment
        observation, reward, done, info = env.step(action[0])

        #record reward and grad
        this_trajectory_reward.append(reward)
        computed_grad_log_state_action = utils.compute_log_grad(
            theta, current_feature, action)
        this_trajectory_grads.append(computed_grad_log_state_action)

        current_feature = utils.extract_features(observation, C.output_dim)

        if done:
            break

    return this_trajectory_reward, this_trajectory_grads
Esempio n. 6
0
def sample_one_reward(theta, env, num_actions):
    this_trajectory_reward = []
    this_trajectory_grads = []

    #first, initialize the observation
    observation = env.reset()
    current_feature = utils.extract_features(observation, num_actions)

    for time_index in range(0, 200):
        #compute an action given current observation
        action_distribution = utils.compute_action_distribution(
            theta, current_feature)
        #print("the action distribution is",action_distribution)
        #action = np.argmax(action_distribution) This is not correct
        #action = np.random.binomial(1,action_distribution[0][1],1)[0]
        action = np.random.choice(num_actions, 1, p=action_distribution[0])[0]
        #print("the action is",action)
        #apply the action to the environment
        observation, reward, done, info = env.step(action)

        this_trajectory_reward.append(reward)
        log_softmax_grad = utils.compute_log_softmax_grad(
            theta, current_feature, action)
        this_trajectory_grads.append(log_softmax_grad)

        current_feature = utils.extract_features(observation, num_actions)

        if done:
            break

    return this_trajectory_reward, this_trajectory_grads
Esempio n. 7
0
def test_linear(args, extr, device, train_loader, test_loader, verbose=True):
    X_train, y_train = extract_features(extr, device, train_loader)
    X_test, y_test = extract_features(extr, device, test_loader)
    clf = LogisticRegression(C=1 / (X_train.size(0) * args.lam),
                             solver='saga',
                             multi_class='multinomial',
                             verbose=int(verbose))
    clf.fit(X_train.cpu().numpy(), y_train.cpu().numpy())
    acc = clf.score(X_test.cpu().numpy(), y_test.cpu().numpy())
    print('Test accuracy = %.4f' % acc)
    return acc
Esempio n. 8
0
 def think(self, game):
     max_point = random.choice(game.get_legal_nearby_moves(2))
     tmp_board = game.get_current_board()
     self._pattern = utils.extract_features(game.board.board, config.pattern_file_name)
     tmp_board[max_point[0]][max_point[1]] = game.current_player.stone_color
     new_pattern = utils.extract_features(tmp_board, config.pattern_file_name)
     #reward
     if new_pattern[10] == 1:
         print("learning...reward 1")
         print(self.CNN.run_learning([[1.]], [self._pattern], [new_pattern]))
     else:
         print("reward 0")
         print(self.CNN.run_learning([[0.]], [self._pattern], [new_pattern]))
     return max_point
Esempio n. 9
0
def main():
    img_ref = cv2.imread('../input/target.jpeg')
    overlay = cv2.imread('../input/overlay.jpg')
    cap = cv2.VideoCapture('../input/input.mp4')

    video_array = []

    # extracting descriptors for reference image
    kp1, kpo1, des1 = utils.extract_features(img_ref)

    frame_counter = 0
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    while cap.isOpened():
        # reading each frame of the video
        ret, frame = cap.read()

        # save the video when reading is over
        if ret is False or (cv2.waitKey(1) & 0xFF == ord('q')):
            out = cv2.VideoWriter(
                '../output/output.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15,
                (video_array[0].shape[1], video_array[0].shape[0]))

            # save each frame to video
            for i in range(len(video_array)):
                out.write(video_array[i])
            out.release()

            break

        # extracting descriptors for each frame of the video
        kp2, kpo2, des2 = utils.extract_features(frame)

        # matching the image descriptors
        matches, matches_pos = matching.match(des1, des2)

        # if number of matches it not at least three
        if len(matches) < 3:
            continue

        # obtaining the final affine matrix
        affine_matrix = affine.affine_transformation_estimation(
            kp1, kp2, matches_pos)

        # pasting the overlying image on each frame
        final_frame = utils.pasting_overlay(img_ref, frame, overlay,
                                            affine_matrix)

        video_array.append(final_frame)
        frame_counter += 1
        print('processing frame', frame_counter, 'from', total_frames)
Esempio n. 10
0
def train():
    notcars = glob.glob('data/non-vehicles/*/*.png')
    cars = glob.glob('data/vehicles/*/*.png')

    print_stats(cars, notcars)

    features_car = []
    for car in cars:
        img = read_image(car)
        img_processed = process_image(img)
        features_car.append(extract_features(img_processed, parameters))

    features_notcar = []
    for notcar in notcars:
        img = read_image(notcar)
        img_processed = process_image(img)  # png
        features_notcar.append(extract_features(img_processed, parameters))

    features = np.vstack((features_car, features_notcar))
    # Fit a per-column scaler
    scaler = StandardScaler().fit(features)
    # Apply the scaler to X
    features_scaled = scaler.transform(features)
    # Define the labels vector
    labels = np.hstack(
        (np.ones(len(features_car)), np.zeros(len(features_notcar))))
    # Split up data into randomized training and test sets
    rand_state = np.random.randint(0, 100)
    out = train_test_split(features_scaled,
                           labels,
                           test_size=0.2,
                           random_state=rand_state)
    features_train, features_test, labels_train, labels_test = out

    # Initialize support vector machine object
    clf = SVC(kernel='linear', C=0.00001)
    # Check the training time for the SVC
    t = time.time()
    clf.fit(features_train, labels_train)
    print('{0:2.2f} seconds to train SVC...'.format(time.time() - t))

    # Accuracy score
    accuracy = clf.score(features_test, labels_test)
    print('Test Accuracy of SVC = {0:2.4f}'.format(accuracy))

    classifier = Classifier(clf, scaler)
    joblib.dump(classifier, 'classifier.pkl')

    return classifier
Esempio n. 11
0
def load_image_features(args):
    # Image preprocessing, normalization for the pretrained b7
    transform = transforms.Compose([
        transforms.Resize(args.crop_size),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    # train split
    fts_file = os.path.join(args.save,
                            'b7_v2.{}.{}.th'.format(args.data_set, 'train'))
    if os.path.isfile(fts_file):
        print('[INFO] loading image features: {}'.format(fts_file))
        fts = torch.load(fts_file, map_location='cpu')
    else:
        print('[INFO] computing image features: {}'.format(fts_file))
        data_loader = Dataset(args.image_folder.format(args.data_set),
                              args.data_split_file.format(
                                  args.data_set, 'train'),
                              transform,
                              num_workers=args.num_workers)
        attr_file = args.attribute_file.format(args.data_set)

        fts = utils.extract_features(data_loader, attr_file,
                                     args.attr2idx_file, device,
                                     args.image_model)
        torch.save(fts, fts_file)

    # dev split
    fts_file_dev = os.path.join(args.save,
                                'b7_v2.{}.{}.th'.format(args.data_set, 'val'))
    if os.path.isfile(fts_file_dev):
        print('[INFO] loading image features: {}'.format(fts_file_dev))
        fts_dev = torch.load(fts_file_dev, map_location='cpu')
    else:
        print('[INFO] computing image features: {}'.format(fts_file_dev))
        data_loader_dev = Dataset(args.image_folder.format(args.data_set),
                                  args.data_split_file.format(
                                      args.data_set, 'val'),
                                  transform,
                                  num_workers=args.num_workers)
        attr_file_dev = args.attribute_file.format(args.data_set)
        fts_dev = utils.extract_features(data_loader_dev, attr_file_dev,
                                         args.attr2idx_file, device,
                                         args.image_model)
        torch.save(fts_dev, fts_file_dev)

    return fts, fts_dev
Esempio n. 12
0
def sample_one_trajectory(q,
                          env,
                          normalizer,
                          normalizer_data,
                          policy,
                          direction=None,
                          delta=None):
    state = env.reset()
    sum_rewards = 0

    normalizer.n = normalizer_data['n']
    normalizer.mean = normalizer_data['mean']
    normalizer.mean_diff = normalizer_data['mean_diff']
    normalizer.var = normalizer_data['var']

    while True:
        state = utils.extract_features(state)
        normalizer.observe(state)
        state = normalizer.normalize(state)
        action = policy.evaluate(state, delta, direction)
        state, reward, done, _ = env.step(action)
        sum_rewards += reward
        if done:
            break

    new_normalizer_data = {}
    new_normalizer_data['n'] = normalizer.n
    new_normalizer_data['mean'] = normalizer.mean
    new_normalizer_data['mean_diff'] = normalizer.mean_diff
    new_normalizer_data['var'] = normalizer.var

    q.put([sum_rewards, new_normalizer_data])
Esempio n. 13
0
def evaluate_text(text, dataset_type, language_model,
                  language_model_output_type, feature_type, classifier):
    """
    :param text: Input text for obfuscation detection
    :param dataset_type: The dataset used to train the obfuscation detection model.
    :param language_model: The Language Model used to train the obfuscation detection model.
    :param language_model_output_type: The Language Model output type used to train the obfuscation detection model.
    :param feature_type: The feature type used to train the obfuscation detection model.
    :param classifier: The classifier used to train the obfuscation detection model.
    :return: a tuple containing three things (prediction, probability_obfuscated, probability_evaded)
    """

    features = utils.extract_features(text, language_model,
                                      language_model_output_type, feature_type)

    req_model_path = 'models/' + '_'.join([
        dataset_type, language_model, language_model_output_type, classifier,
        feature_type
    ])
    clf = utils.load_model(req_model_path)
    prediction = (clf.predict([features]))[0]
    if prediction == 0:
        logging.warning('The input text is ORIGINAL ')
        return "Original"
    else:
        logging.warning('The input text is OBFUSCATED ')
        return "Obfuscated"
Esempio n. 14
0
def main():
    file_names = os.listdir(args.data_dir)
    result = dict()

    print('Loading model...')
    m = joblib.load(model_path)

    for i in tqdm(range(len(file_names))):
        _, wav_data = read_wav(file_names[i], 'test', silence=True)
        feature = extract_features(wav_data,
                                   task=args.task,
                                   frame_size=frame_size,
                                   frame_shift=frame_shift)

        if args.task == 1:
            pred = m.predict(feature).tolist()
        else:
            pred1 = m[0].score_samples(feature)
            pred2 = m[1].score_samples(feature)
            pred = (pred1 > pred2).astype(np.int64).tolist()
        pred = prediction_to_vad_label(pred,
                                       frame_size=frame_size,
                                       frame_shift=frame_shift)

        file_name = file_names[i].split('.')[0]
        result[file_name] = pred

    print('Saving results...')
    save_prediction_labels(result,
                           save_dir=args.save_dir,
                           file_name=args.save_name,
                           save_format='txt')
Esempio n. 15
0
    def resolve(self, img_in, patch_size=(9, 9), step=6, augmented=True):
        assert img_in.ndim == 2

        features = utils.extract_features(img_in, augmented=augmented)
        n_features = features.shape[-1]
        patches = self._split_into_patches(features, (*patch_size, n_features),
                                           step=6)
        patches_arr_size = patches.shape[0:2]

        X = np.reshape(patches, (np.prod(patches_arr_size), -1))
        X = self.scaler.transform(X)
        X_comp = self.lsh.transform(X)
        Y_pred = self.rf.predict(X, X_comp)  # MOST EXPENSIVE ACTION

        patches = np.reshape(Y_pred, (*patches_arr_size, *patch_size))

        img_out_delta = np.zeros(img_in.shape)
        div_coef = np.zeros(img_in.shape)
        for i in range(patches_arr_size[0]):
            for j in range(patches_arr_size[1]):
                patch = patches[i, j]
                p = patch_size[0]
                img_out_delta[step * i:step * i + p,
                              step * j:step * j + p] += patch
                div_coef[step * i:step * i + p,
                         step * j:step * j + p] += np.ones(patch_size)

        with warnings.catch_warnings():  # do not warn on division by zero
            warnings.simplefilter("ignore")
            img_out_delta /= div_coef
        return img_in + img_out_delta
Esempio n. 16
0
    def main(self, query):
        # print("My name is Chatterbot and I'm a chatbot. If you want to exit, type Bye!")

        user_response = query.lower()
        print(user_response)

        # user_intent = self.intentClassifier.predict(user_response)
        # print("intent is: %s" % (user_intent))

        prediction = predict([query])
        user_intent = prediction[0][1]
        search_features = extract_features(prediction[0][0], prediction[0][1],
                                           prediction[0][2])
        # print(pprint([(X.text, X.label_) for X in search_features]))

        if search_features:
            self.infoExtractor.extractSearchParams(search_features)
            self.resp = self.complexResponse()
        elif user_intent:
            self.resp = self.simpleResponse(user_intent)
        else:
            self.resp = self.initiatorResponse()

        print("---------------" + self.resp[0])
        if self.reset:
            self.reset = None
            self.infoExtractor.resetParams()
        return self.resp
Esempio n. 17
0
    def load_data(self):
        log("LOADING IMAGES...")
        positive_files = []
        negative_files = []
        for pos_dir in self.pos_directories:
            positive_files += glob.glob(pos_dir + '*')
        for neg_dir in self.neg_directories:
            negative_files = glob.glob(neg_dir + '*')
        positive_images = [np.array(Image.open(x).convert('L')) for x in positive_files]
        negative_images = [np.array(Image.open(x).convert('L')) for x in negative_files]

        image_shape = positive_images[0].shape
        for i, im in enumerate(positive_images):
            if im.shape != image_shape:
                message = "Not all images have same shape. Image with different shape: %s" % positive_files[i]
                raise ValueError(message)
        for i, im in enumerate(negative_images):
            if im.shape != image_shape:
                message = "Not all images have same shape. Image with different shape: %s" % negative_files[i]
                raise ValueError(message)
        self.window_shape = (image_shape[0] - 2, image_shape[1] - 2)  # Account for convolution

        log("EXTRACTING FEATURES...")
        self.training_features = np.vstack(
            (utils.extract_features(im, self.cell_size, self.window_shape)[0]
             for im in positive_images + negative_images)
        )
        self.training_labels = [TRUE] * len(positive_images) + [FALSE] * len(negative_images)
        self.training_labels = np.array(self.training_labels)
Esempio n. 18
0
def audio_prediction(audio_file):
    feats = extract_features(
        audio_file, mel=True, mfcc=True, chroma=True, contrast=True)
    scaler = pickle.load(open(config.SCALAR_PATH, 'rb'))
    X = scaler.transform(feats.reshape(1, -1))
    pred = MODEL.predict_proba(X)
    return pred[0][1]
Esempio n. 19
0
 def __init__(self, *args, **kwargs):
     super(GuiTestPlayer, self).__init__(*args, **kwargs)
     self._move_event = Event()
     self._next_move = None
     self._pattern = [0] * config.pattern_num
     self._feature = utils.extract_features(Board().board, config.pattern_file_name)
     self.CNN = cnn.CriticNN(len(self._feature))
Esempio n. 20
0
def calc_orientation_features(image, n_taps=5):
    """
    Calculates the orientation tensor features for each position in an image

    Parameters:
    -----------
    image : np array of floating point values; shape: [n_rows, n_cols]
    n_taps : specifies whether to use 5-tap or 7-tap derivative filters

    Returns:
    --------
    E : list of energy values for each pixel position in image
    O : list of orientedness values for each pixel position in image
    T : list of theta values for each pixel position in image
    """
    E = []  #energy values
    O = []  #orientedness values
    T = []  #theta values

    n_rows, n_cols = iamge.shape
    for x in range(n_cols):
        for y in range(n_rows):
            x_p = x_deriv(image, n_taps=n_taps)
            y_p = y_deriv(image, n_taps=n_taps)
            T_loc = gen_cov(x_p, y_p, x, y)
            energy, orientedness, theta = extract_features(T_loc)
            E.append(energy)
            O.append(orientedness)
            T.append(theta)

    return E, O, T
Esempio n. 21
0
 def board_value_listener(self, event):
     patterns = file_to_patterns('pattern.txt')
     feature = extract_features(event.board.board, patterns)
     cnn = CriticNN(len(feature))
     children = []
     features = []
     for pos, next_board in event.board.enumerate_next_board():
         n = pos[0] * 15 + pos[1]
         stone = self.children[224-n]
         if stone.has_stone():
             continue
         else:
             children.append(stone)
             feature = extract_features(next_board, patterns)
             features.append(feature)
     for child, v in zip(children, cnn.run_value(features)):
         child.show_value(v[0])
Esempio n. 22
0
 def __init__(self, *args, **kwargs):
     super(ReinforceAIPlayer, self).__init__(*args, **kwargs)
     self._move_event = Event()
     self._next_move = None
     self.mul_values = [10000, 8000, 1000, 1000, 900, 100, 400, 110, 100, 60, 5, 5, 50, 50, -10000, -8000, -1000, -1000, -900, -100, -400, -110, -100, -60, -5, -5, -50, -50]
     self._feature = utils.extract_features(Board().board, config.pattern_file_name)
     self.CNN = cnn.CriticNN(len(self._feature))
     self.load_pattern = utils.file_to_patterns("pattern.txt")
def evaluate_model(model, val_tweets):
    correct, total = 0, len(val_tweets)
    for val_set_X, val_set_y in utils.extract_features(
            val_tweets, feat_type=utils.FEAT_TYPE, test_file=False):
        prediction = model.predict_on_batch(val_set_X)
        prediction = np.round(prediction)
        correct += np.sum(prediction == val_set_y[:, None])
    return float(correct) / total
Esempio n. 24
0
	def run_preprocessing_pipeline(image_id):
	    image_path = os.path.join(base_dir, image_id)
	    image = skimage.io.imread(image_path)
	    image = utils.segment_lungs(color.rgb2gray(image), display=True)
	    image = utils.clean_noise(image)
	    feature_dictionary = utils.extract_features(image)
	    feature_dictionary['image_id'] = image_id
	    return feature_dictionary
Esempio n. 25
0
    def forward(self, F_a, F_b, positive_matches, iteration, train_or_val):
        '''
        F_a is a list containing 5 feature maps from different layers
        1: B x C X H/(scale*4) x W/(scale*4)
        2: B x C X H/(scale*8) x W/(scale*8)
        3: B x C X H/(scale*8) x W/(scale*8)
        4: B x C X H/(scale*16) x W/(scale*16)
        5: B x C X H/(scale*16) x W/(scale*16)
        known_matches is the positive matches sampled by dataloader.
        {'a':BxNx2,'b':BxNx2}
        '''
        self.max_size_x = F_a[0].shape[3]  # B x C x H x W
        self.max_size_y = F_a[0].shape[2]

        '''compute loss for each layer'''
        loss = 0
        contrasloss = 0
        gnloss = 0
        e1 = 0
        e2 = 0

        contrasloss_level = []
        gnloss_level = []
        loss_pos_mean_level = []
        loss_neg_mean_level = []

        N = positive_matches['a'].shape[1]  # the number of pos and neg matches
        # compute scaling w.r.t original size (i.e robotcar 1024*1024)
        scaling = [4*self.img_scale, 8*self.img_scale, 8*self.img_scale, 16*self.img_scale, 16*self.img_scale]
        for i in range(len(F_a)):
            # scaling for current layer
            level = scaling[i]
            # randomly select positive matches from dataset
            positive_matches_sampled = random_select_positive_matches(positive_matches['a'], positive_matches['b'], num_of_pairs=self.num_matches)
            # slice positive features
            fa_sliced_pos = extract_features(F_a[i], positive_matches_sampled['a'] / level)
            '''compute contrastive loss'''
            # sample from topM hardest negatives
            topM = np.clip(300*np.exp(-iteration*0.6/10000), a_min = 5, a_max=None)
            # progressive mining negative samples
            loss_contras, loss_pos_mean, loss_neg_mean = self.pair_selector.get_triplets(F_a[i], F_b[i], positive_matches_sampled, level, topM = int(topM), dist_threshold=0.2, train_or_val=train_or_val, level=i)            

            contrasloss_level.append(loss_contras) # check loss on all scales for debugging 
            loss_pos_mean_level.append(loss_pos_mean)
            loss_neg_mean_level.append(loss_neg_mean)

            '''compute gn loss'''
            loss_gn_all = self.compute_gn_loss(fa_sliced_pos, F_b[i], positive_matches_sampled['b'] / level, train_or_val)  # //4
            loss_gn = loss_gn_all[0]
            gnloss_level.append(loss_gn)
            loss = self.contrastive_lamda*loss_contras + (self.gn_lamda * loss_gn) + loss 
            gnloss = (self.gn_lamda * loss_gn) + gnloss # for visualization in trainer.py
        
            contrasloss = (self.contrastive_lamda * loss_contras) + contrasloss
            e1 = e1 + loss_gn_all[1]
            e2 = e2 + loss_gn_all[2]

        return loss, contrasloss, gnloss, contrasloss_level, gnloss_level, e1, e2, loss_pos_mean_level, loss_neg_mean_level
Esempio n. 26
0
 def board_value_listener(self, event):
     return
     patterns = file_to_patterns('pattern.txt')
     feature = extract_features(event.board.board, patterns)
     cnn = CriticNN(len(feature))
     children = []
     features = []
     for pos, next_board in event.board.enumerate_next_board():
         n = pos[0] * 15 + pos[1]
         stone = self.children[224-n]
         if stone.has_stone():
             continue
         else:
             children.append(stone)
             feature = extract_features(next_board, patterns)
             features.append(feature)
     for child, v in zip(children, cnn.run_value(features)):
         child.show_value(v[0])
Esempio n. 27
0
def full_training():
  TRAIN_DIR = "./train-mails"
  TEST_DIR = "./test-mails"

  dictionary = make_dictionary(TRAIN_DIR)

  print("reading and processing emails from file.")
  features_matrix, labels = extract_features(TRAIN_DIR, dictionary)
  test_feature_matrix, test_labels = extract_features(TEST_DIR, dictionary)

  print("training model.")
  clf = svm.SVC()
  clf.fit(features_matrix, labels)

  predicted_labels = clf.predict(test_feature_matrix)

  print("FINISHED classifying. accuracy score: ")
  print(accuracy_score(test_labels, predicted_labels))
Esempio n. 28
0
	def classify_data(self, online_training: bool = False):
		filtered_data = self.filter_settings.apply(self.data_buffer[:, self.INTERNAL_BUFFER_EXTRA_SIZE:])

		feature_vector = \
			utils.extract_features([utils.EegData(filtered_data)], self.feature_extraction_info, self.feature_types)[0]

		feature_data = feature_vector.data

		print("Feature Vector extracted successfully...")

		label = self.classifier.classify(feature_data)

		while type(label) == np.ndarray:
			label = label[0]

		print(f"label = {label}")

		direction = None

		if online_training:
			correct_label = self.current_mental_task.label

			self.trial_count += 1

			if label != correct_label:
				self.log("Wrong classification!")
			else:
				self.log("Correct Classification")
				self.correct_count += 1

			# self.classifier.get_data_set().append_to(feature_data, np.array([correct_label]), data.DataSubSetType.TRAINING)
			# TODO: Might block execution for too long
			# self.classifier.train()
			# self.log("Training accuracy: " + self.classifier.training_set_accuracy())

		path = ""
		for trial_class in self.trial_classes:
			if trial_class.label == label:
				path = trial_class.get_image_path()
				direction = trial_class.direction
				break
		self.class_pixmap = QPixmap(path).scaledToHeight(self.CLASS_IMAGE_HEIGHT, Qt.FastTransformation)
		self.class_label.setPixmap(self.class_pixmap)

		if self.ROBOT_CONTROL:
			if direction == utils.Direction.LEFT:
				self.motor_control.turn_left_from_middle(90, self.DEFAULT_ROBOT_SPEED)
				print("left")
			elif direction == utils.Direction.RIGHT:
				self.motor_control.turn_right_from_middle(90, self.DEFAULT_ROBOT_SPEED)
				print("right")
			elif direction == utils.Direction.FORWARD:
				self.motor_control.forward(self.DEFAULT_ROBOT_SPEED)
				print("forward")
			elif direction == utils.Direction.BACKWARD:
				self.motor_control.backward(self.DEFAULT_ROBOT_SPEED)
				print("backward")
Esempio n. 29
0
def evaluate(gallery_loader, probe_loader, net, epoch, recorder, logger):

    stats = recorder.val_stats
    meters = {stat: AverageMeter() for stat in stats}
    net.eval()

    gallery_features, gallery_labels, gallery_views = extract_features(
        gallery_loader, net, index_feature=0, require_views=True)
    probe_features, probe_labels, probe_views = extract_features(
        probe_loader, net, index_feature=0, require_views=True)
    dist = cdist(gallery_features, probe_features, metric='euclidean')
    CMC, MAP = eval_cmc_map(dist, gallery_labels, probe_labels, gallery_views,
                            probe_views)
    rank1 = CMC[0]
    meters['acc'].update(rank1, 1)

    logger.print_log('  **Test**  ' + create_stat_string(meters))
    recorder.update(epoch=epoch, is_train=False, meters=meters)
Esempio n. 30
0
    def get_next_impression_block(self):
        # Obtain the first line of an impression block
        assert len(self.line_buffer) <= 1
        if len(self.line_buffer) == 0:
            line = self.get_next_line()
            if not line:
                return False
        else:
            line = self.line_buffer.pop()

        block_impression_id = utils.extract_impression_id(line)
        if self.id_map:
            block_impression_id = self.id_map(block_impression_id)

        if not self.isTest:
            cost, propensity = utils.extract_cost_propensity(
                line, inverse_propensity=self.inverse_propensity)

        candidate_features = [utils.extract_features(line, self.debug)]

        while True:
            line = self.get_next_line()
            if not line:  #EOF
                break

            line_impression_id = utils.extract_impression_id(line)
            if self.id_map:
                line_impression_id = self.id_map(line_impression_id)

            if line_impression_id != block_impression_id:
                # Save the line in the line_buffer
                self.line_buffer.append(line)
                break
            else:
                candidate_features.append(
                    utils.extract_features(line, debug=self.debug))

        _response = {}
        _response["id"] = block_impression_id
        _response["candidates"] = candidate_features
        if not self.isTest:
            _response["cost"] = cost
            _response["propensity"] = propensity
        return _response
Esempio n. 31
0
def find_neighbors(train_set, test_sample, test_class, k):
    test_scaled = extract_features(test_sample)
    knn = {}
    mapping = {
        0: 0,
        1: 0,
        2: 0,
        3: 0,
        4: 0,
        5: 0,
        6: 0,
        7: 0,
        8: 0,
        9: 0,
        10: 0,
        11: 0
    }
    for img_class, class_item in enumerate(train_set):
        for img_add in class_item:

            img_scaled = extract_features(img_add)
            dist = np.linalg.norm(test_scaled - img_scaled)
            if len(knn) < k:
                knn[str(img_class) + '/' +
                    img_add[img_add.rfind('/') + 1:-4]] = dist
            elif dist < max(knn.items(), key=operator.itemgetter(1))[1]:
                knn[str(img_class) + '/' +
                    img_add[img_add.rfind('/') + 1:-4]] = dist
                del knn[max(knn.items(), key=operator.itemgetter(1))[0]]

    print('-------------')
    for key in knn.keys():
        idx = int(key[:key.find('/')])

        mapping[idx] = mapping[idx] + 1

    mapping = {k: v for k, v in mapping.items() if v}

    prediction = max(mapping.items(), key=operator.itemgetter(1))[0]
    print(mapping)
    print(test_sample)
    print('prediction is: ', prediction)
    print('true label is: ', test_class)
    return prediction
Esempio n. 32
0
    def __getitem__(self, idx):
        name = self.path + '/' + self.dataList[idx]

        features = utils.extract_features(name)

        if self.dataLabel[idx] == 'F':
            labels = np.asarray([0]).astype(float)
        elif self.dataLabel[idx] == 'M':
            labels = np.asarray([1]).astype(float)
        return features, labels
Esempio n. 33
0
 def think(self, game):
       if game.board.num_stone < len(game.test_move):
           max_point = game.test_move[game.board.num_stone]
           tmp_board = game.board.board
           self._pattern = utils.extract_features(tmp_board, config.pattern_file_name)
           #print("current pattern:", self._pattern)
           tmp_board[max_point[0]][max_point[1]] = game.current_player.stone_color
           new_pattern = utils.extract_features(tmp_board, config.pattern_file_name)
           #print("new pattern:", new_pattern)
           new_occurence = utils.pattern_occurrence(tmp_board, utils.file_to_patterns("pattern.txt"))
           print("new occur:", utils.pattern_occurrence(tmp_board, utils.file_to_patterns("pattern.txt")))
           #reward
           if new_occurence[10] >= 1:
               print("learning...reward 1")
               print(self.CNN.run_learning([[1.]], [self._pattern], [new_pattern]))
           else:
               print("reward 0")
               print(self.CNN.run_learning([[0.]], [self._pattern], [new_pattern]))
           return max_point
Esempio n. 34
0
    def test(self, im):
        pool = None
        if utils.USE_THREADING:
            pool = multiprocessing.Pool(utils.NUM_THREADS)

        start_time = time.time()
        utils.log_since("Starting", start_time)
        all_features = []
        all_positions = []

        for scale in self.scales:
            if scale != 1:
                downsampled_image = ndimage.interpolation.zoom(im, scale)
            else:
                downsampled_image = im
            features, positions = utils.extract_features(downsampled_image, self.cell_size, self.window_shape)
            if features is None:
                continue
            positions /= scale
            all_features.append(features)
            all_positions.append(positions)

        all_features = np.vstack(all_features)
        all_positions = np.vstack(all_positions)
        for i, (classifier, threshold) in enumerate(izip(self.classifiers, self.thresholds)):
            utils.log_since("Testing cascade level %s" % i, start_time)
            output_probs = classifier.predict_proba(all_features)[:, 1]

            # print "non-zero", np.sum(output_probs != 0)
            # hist, edges = np.histogram(output_probs, 100, (0, 1))
            # for i, h in enumerate(hist):
            #     print "%.4f: %s" % (edges[i], "*" * h)

            meets_threshold = output_probs > threshold
            output_probs = output_probs[meets_threshold]
            all_positions = all_positions[meets_threshold]
            all_features = all_features[meets_threshold]

        self.positions = all_positions
        self.likelihoods = output_probs
        if self.positions.shape[0] > 0:
            best_activation = np.argmax(output_probs)
            self.best_position = self.positions[best_activation, :].tolist()
        else:
            self.best_position = None

        utils.log_since("Done", start_time)

        if utils.USE_THREADING:
            pool.close()

        return all_positions #self.best_position
Esempio n. 35
0
    def think(self, game):
        legal_moves = game.board.get_legal_nearby_moves(2) or [(7, 7)]
        values_dict = {}
        tmp_board = game.board.board
        pattern_array = []
        white_will_win = 0
        black_will_win = 0
        max_point = (-1, -1)
        max_eval_move = (-1, -1)
        if game.current_player.stone_color == 'b':
            max_eval = -10000
        else:
            max_eval = 10000
        occurence = utils.pattern_occurrence(game.board.board, self.load_pattern)
        od_value = sum([a*b for a,b in zip(occurence, self.mul_values)])
        for x, y in legal_moves:
            tmp_board[x][y] = game.current_player.stone_color
            pattern = utils.extract_features(tmp_board, config.pattern_file_name)
            pattern_array.append(pattern)
            state = utils.get_state(tmp_board)
            self_occurence = utils.pattern_occurrence(tmp_board, self.load_pattern)
            self_value = sum([a*b for a,b in zip(self_occurence, self.mul_values)])
            if game.current_player.stone_color == 'b':
                if self_value > max_eval:
                    max_eval = self_value
                    max_eval_move = (x, y)
                elif self_value == max_eval:
                    if random.randint(0,9) >= 4:
                        max_eval_move = (x, y)
            elif game.current_player.stone_color == 'w':
                if self_value < max_eval:
                    max_eval = self_value
                    max_eval_move = (x, y)
                elif self_value == max_eval:
                    if random.randint(0,9) >= 4:
                        max_eval_move = (x, y)

            if state == 1:
                print('b win')
                black_will_win = 1
                max_point = (x, y)
            elif state == 2:
                print('w win')
                white_will_win = 1
                max_point = (x, y)
            tmp_board[x][y] = '.'

        if max_eval_move == (-1, -1):
            max_eval_move = random.choice(legal_moves)

        values = self.CNN.run_value(pattern_array)
        value_set = set()
        for index, (x, y) in enumerate(legal_moves):
            values_dict[(x, y)] = values[index]
            value_set.add(values[index][0])

        if black_will_win == 0 and white_will_win == 0:
            if random.randint(0,9) >= 3 and len(value_set) >= 5:
                #print("set len:", len(value_set))
                if game.current_player.stone_color == 'b':
                    max_point = max(values_dict.items(), key=operator.itemgetter(1))[0]
                else:
                    max_point = min(values_dict.items(), key=operator.itemgetter(1))[0]
            else:
                max_point = max_eval_move
                #max_point = random.choice(legal_moves)
        tmp_board[max_point[0]][max_point[1]] = game.current_player.stone_color
        self._feature = utils.extract_features(game.board.board, config.pattern_file_name)
        new_pattern = utils.extract_features(tmp_board, config.pattern_file_name)
        print(max_point)
        #print(values_dict[max_point])
        #print("new_pattern", new_pattern)
        #reward
        if black_will_win == 1:
            print("learning...reward 1")
            print(self.CNN.run_learning([[1.]], [self._feature], [new_pattern]))
        elif white_will_win == 1:
            print("learning...reward -1")
            print(self.CNN.run_learning([[-1.]], [self._feature], [new_pattern]))
        else:
            new_occurence = utils.pattern_occurrence(tmp_board, self.load_pattern)
            print("new_occur", new_occurence)
            self_occurence = utils.pattern_occurrence(game.board.board, self.load_pattern)
            self_value = sum([a*b for a,b in zip(self_occurence, self.mul_values)])
            new_value = sum([a*b for a,b in zip(new_occurence, self.mul_values)])
            print("self value:", self_value)
            print("new value:", new_value)
            if new_value > self_value:
                print("learning...reward 0.x")
                print(self.CNN.run_learning([[0.00001 * (new_value - self_value)]], [self._feature], [new_pattern]))
            elif new_value < self_value:
                print("learning...reward -0.x")
                print(self.CNN.run_learning([[0.00001 * (new_value - self_value)]], [self._feature], [new_pattern]))
            else:
                print("reward 0")
                print(self.CNN.run_learning([[0.]], [self._feature], [new_pattern]))
        return max_point
Esempio n. 36
0
 def __init__(self, *args, **kwargs):
     super(LearningTestPlayer, self).__init__(*args, **kwargs)
     self._feature = utils.extract_features(Board().board, config.pattern_file_name)
     self.CNN = cnn.CriticNN(len(self._feature))
     self._pattern = [0] * len(self._feature)
Esempio n. 37
0
from __future__ import division  # Python 2 users only
import nltk
import utils
from pprint import pprint

articles = utils.load_data()

# other values: 'content', 'summary_en', 'headline_en'
content_type = 'summary_en'
test_set_size = 300
corpus_size = len(articles)

articles = articles[:1500]
feature_sets = utils.extract_features(articles, content_type)

print("Test set size:", test_set_size," ****************\n")

for train_set_size in range(500, corpus_size, 300):

    print("Train set size:", train_set_size)

    # Split in test - train
    train_set = feature_sets[test_set_size:train_set_size]
    test_set = feature_sets[:test_set_size]

    classifier = nltk.NaiveBayesClassifier.train(train_set)

    print("train set", nltk.classify.accuracy(classifier, train_set))
    print("test set", nltk.classify.accuracy(classifier, test_set))
    print