Пример #1
0
def register_a_face():
    if request.method == 'POST':

        name = request.form.get('name')

        f = request.files['image']
        f.save(os.path.join(temp_filepath, f.filename))
        img_list = os.listdir(temp_filepath)

        image = face_recognition.load_image_file(
            os.path.join(temp_filepath, img_list[0]))

        encoded_image = face_recognition.face_encodings(image)[0]

        encoded_image_list = (util.load_list())["faces"]
        encoded_image_list.append({
            'name': name,
            'encoded_image': encoded_image.tolist()
        })
        util.save_list({"faces": encoded_image_list})

        util.flush_files('temp')

        return "registered"

    if request.method == 'GET':
        encoded_image_json = util.load_list()
        return encoded_image_json

    if request.method == 'DELETE':
        util.save_list({'faces': []})
        return 'deleted'
Пример #2
0
def main():
    """
        Main loop of program.
        Infinitely iterates over the list of subreddits
    """
    exit_if_already_started()
    while True:
        for timeframe in ['all', 'month', 'week']:
            subreddits = load_list('subs.txt')
            while subreddits:
                # Grab all images/comments from sub, remove from list
                parse_subreddit(subreddits.pop(0), timeframe)
Пример #3
0
    def start(self):
        """
        Starts the blacklister process.
        """
        # Load blacklist
        print('Loading blacklist...'),
        self.blacklist = util.load_list(util.BLACKLIST_FILE, ',')
        print(str(self.blacklist))

        # Log in to reddit
        self.reddit = util.login(MentionedBot.USER_AGENT)

        while True:
            try:
                self.tick()
            except Exception as e:
                print(e)
Пример #4
0
    def run(self):
        for sub in load_list("subs.txt"):
            self._rabbitmq_channel.queue_bind(
                exchange='reddit',
                queue=self._rabbitmq_queue.method.queue,
                routing_key="*.%s" % sub)

        def msg_callback(ch, method, properties, body):
            self._q.put(body)

        self._rabbitmq_channel.basic_consume(
            queue=self._rabbitmq_queue.method.queue,
            on_message_callback=msg_callback,
            auto_ack=True)
        for _ in range(0, 30):
            t = Thread(target=self._message_callback_worker)
            t.start()
        self._rabbitmq_channel.start_consuming()
Пример #5
0
def open_a_locker():
    if request.method == 'POST':

        f = request.files['image']
        f.save(os.path.join(temp_filepath, f.filename))
        img_list = os.listdir(temp_filepath)

        image = face_recognition.load_image_file(
            os.path.join(temp_filepath, img_list[0]))

        if len(face_recognition.face_encodings(image)) > 0:
            encoded_image = face_recognition.face_encodings(image)[0]
        else:
            return 'nobody'

        encoded_image_list = (util.load_list())["faces"]

        who = util.check_who(encoded_image_list, encoded_image)

        util.flush_files('temp')

        return who
Пример #6
0
def get_subs():
    return Response(json.dumps({'subreddits': load_list('subs.txt')}),
                    mimetype="application/json")
Пример #7
0
def count_subs_txt():
    return len(load_list('subs.txt'))
Пример #8
0
    nidx = np.arange(len(y))[y == 0]
    pidx = np.arange(len(y))[y == 1]
    pfolds = StratifiedKFold(sublety[pidx],
                             n_folds=10,
                             shuffle=True,
                             random_state=113)
    nfolds = KFold(y, n_folds=10, shuffle=True, random_state=113)
    pfolds = [x for x in pfolds]
    nfolds = [x for x in nfolds]
    folds = []

    for i in range(len(pfolds)):
        tr = np.concatenate(pfolds[i][0], nfolds[i][0])
        te = np.concatenate(pfolds[i][1], nfolds[i][1])
        folds.append((tr, te))

    return folds


if __name__ == "__main__":
    real_path = sys.argv[1]
    predicted_path = sys.argv[2]

    paths, real = util.load_list(real_path)
    _, predicted = util.load_list(predicted_path)

    mea = evaluate(real, predicted, paths)
    print "Sensitivity {:.2f}, \nfppi mean {:.2f}, fppi std {:.2f} \n".format(
        mea[0], mea[1], mea[2])
    #print "Sensitivity {:.2f}, \nfppi mean {:.2f}, fppi std {:.2f} \niou mean {:.2f} iou std {:.2f}, iou+ mean {:.2f}, iou+ std {:.2f}".format(mea[0], mea[1], mea[2], mea[3], mea[4], mea[5], mea[6])
Пример #9
0
def train():
    # Getting settings from config.py
    max_len = cfg.MAX_TOKEN_LEN
    num_token = cfg.NUM_OF_TOKEN
    imw = cfg.IMW
    imh = cfg.IMH

    # Training params
    is_train = True
    batch_size_const = cfg.GPU_BATCH_SIZE
    num_ite_to_update = cfg.NUM_ITE_TO_UPDATE
    lr = cfg.LR
    momentum = cfg.MOMENTUM
    lr_decay = cfg.LR_DECAY
    max_grad = cfg.MAX_GRAD_CLIP
    num_e = cfg.NUM_EPOCH

    # Tracking/Saving
    last_e = -1
    global_step = 0
    running_loss = 0
    num_ite_to_log = cfg.NUM_ITE_TO_LOG
    num_ite_to_vis = cfg.NUM_ITE_TO_VIS
    num_epoch_to_save = cfg.NUM_EPOCH_TO_SAVE
    all_loss = []
    save_name = cfg.SAVE_NAME
    meta_name = cfg.META_NAME
    vis_path = cfg.VIS_PATH

    use_cuda = cfg.CUDA and torch.cuda.is_available()
    save_path = cfg.MODEL_FOLDER
    dataset_path = cfg.DATASET_PATH + 'CROHME2013_data/TrainINKML/'
    subset_list = cfg.SUBSET_LIST
    scale_factors = cfg.SCALE_FACTORS

    # Load the vocab dictionary for display purpose
    _, id_to_word = get_gt.build_vocab('mathsymbolclass.txt')

    # Initialize the network and load its weights
    net = AGRU()
    save_files = glob.glob(save_path + save_name + '*.dat')
    meta_files = glob.glob(save_path + meta_name + '*.dat')
    if (len(save_files) > 0):
        save_file = sorted(save_files)[-1]
        print('Loading network weights saved at %s...' % save_file)
        loadobj = torch.load(save_file)
        net.load_state_dict(loadobj['state_dict'])
        last_e, running_loss, all_loss, lr = util.load_list(
            sorted(meta_files)[-1])
        print('Loading done.')

    if (use_cuda):
        net.cuda()

    # For debugging
    if (not is_train):
        net.train(False)

    # Get a list of convolutional layers
    conv_layers = util.get_layers(net, lambda x: type(x) == type(net.conv1_3))

    # Get conv parameters
    conv_params = []
    for c in conv_layers:
        for p in c.parameters():
            if (p.requires_grad):
                conv_params.append(p)

    # Get a list of trainable layers that are not convolutional
    other_layers = util.get_layers(
        net,
        lambda x: type(x) != type(net.conv1_3) and hasattr(x, 'parameters'))
    other_layers = other_layers[1:]  # The first layer is attend_GRU.AGRU

    # Get GRU parameters
    gru_params = []
    for l in other_layers:
        for p in l.parameters():
            gru_params.append(p)

    # Set different learning rates for conv layers and GRU layers
    optimizer = optim.Adam([{
        'params': gru_params
    }, {
        'params': conv_params,
        'lr': lr
    }],
                           lr=lr)

    # Loss function
    criterion = nn.CrossEntropyLoss(ignore_index=1)

    # Get full paths to train inkml files, create a list of scale factors to be used for rendering train images
    inkml_list = []
    scale_list = []

    for i, subset in enumerate(subset_list):
        subset_inkml_list = glob.glob(dataset_path + subset + '*.inkml')
        inkml_list += subset_inkml_list
        scale_list += [scale_factors[i]] * len(subset_inkml_list)
    inkml_list = np.asarray(inkml_list)
    scale_list = np.asarray(scale_list)

    #inkml_list = inkml_list[0:120]
    #scale_list = scale_list[0:120]
    num_train = len(inkml_list)
    num_ite = int(np.ceil(1.0 * num_train / batch_size_const))

    # Main train loop
    optimizer.zero_grad()
    for e in range(last_e + 1, num_e):
        permu_ind = np.random.permutation(range(num_train))
        inkml_list = inkml_list[permu_ind.astype(int)]
        scale_list = scale_list[permu_ind.astype(int)]

        if (e % cfg.NUM_EPOCH_TO_DECAY == cfg.NUM_EPOCH_TO_DECAY - 1):
            lr = lr * lr_decay
            print('Current learning rate: %.8f' % lr)
            optimizer.param_groups[0]['lr'] = lr
            optimizer.param_groups[1]['lr'] = lr

        for i in range(num_ite):

            batch_idx = range(i * batch_size_const, (i + 1) * batch_size_const)
            if (batch_idx[-1] >= num_train):
                batch_idx = range(i * batch_size_const, num_train)
            batch_size = len(batch_idx)
            batch_x = util.batch_data(inkml_list[batch_idx],
                                      scale_list[batch_idx], is_train)
            batch_y_np = util.batch_target(inkml_list[batch_idx])
            batch_y = util.np_to_var(batch_y_np, use_cuda)

            pred_y, attention = net(batch_x, batch_y)

            # Convert the 3D tensor to 2D matrix of shape (batch_size*MAX_TOKEN_LEN, NUM_OF_TOKEN) to compute log loss
            pred_y = pred_y.view(-1, num_token)
            # Remove the <start> token from target vector & prediction vvector
            batch_y = batch_y.view(batch_size, max_len)
            batch_y = batch_y[:, 1:].contiguous()
            batch_y = batch_y.view(-1)
            pred_y = pred_y.view(batch_size, max_len, num_token)
            pred_y = pred_y[:, 1:].contiguous()
            pred_y = pred_y.view(batch_size * (max_len - 1), num_token)

            loss = criterion(pred_y, batch_y)
            loss.backward()
            running_loss += loss.data[0]

            if (global_step % num_ite_to_update == (num_ite_to_update - 1)):
                util.grad_clip(net, max_grad)
                optimizer.step()
                optimizer.zero_grad()
                running_loss /= num_ite_to_update
                all_loss.append(running_loss)
                running_loss = 0

            # Printing stuffs to console
            if (global_step % num_ite_to_log == (num_ite_to_log - 1)):
                print('Finished ite %d/%d, epoch %d/%d, loss: %.5f' %
                      (i, num_ite, e, num_e, all_loss[-1]))

                # Printing prediction and target
                pred_y_np = util.var_to_np(pred_y, use_cuda)
                pred_y_np = np.reshape(pred_y_np,
                                       (batch_size, max_len - 1, num_token))
                # Only display the first sample in the batch
                pred_y_np = pred_y_np[0, 0:40, :]
                pred_y_np = np.argmax(pred_y_np, axis=1)
                pred_list = [id_to_word[idx] for idx in list(pred_y_np)]
                print('Prediction: %s' % ' '.join(pred_list))

                batch_y_np = np.reshape(batch_y_np, (batch_size, max_len))
                batch_y_np = batch_y_np[0, 1:40]
                target_list = [id_to_word[idx] for idx in list(batch_y_np)]
                print('Target: %s\n' % ' '.join(target_list))

            if (global_step % num_ite_to_vis == (num_ite_to_vis - 1)):
                tmp_x = util.var_to_np(batch_x, use_cuda)[0, :, :, :]
                tmp_x = np.transpose(tmp_x, (1, 2, 0))[:, :, 0:3]
                attention_np = attention.data.cpu().numpy()[0, 2:, :, :]
                for k in range(10):
                    attention_k = attention_np[k, :, :] / np.max(
                        attention_np[k, :, :]) * 0.8
                    attention_k = (scipy.misc.imresize(
                        attention_k, 16.0, interp='bicubic')) / 255.0
                    tmp_x = scipy.misc.imresize(tmp_x, attention_k.shape)
                    attention_k = np.repeat(np.expand_dims(attention_k, 2), 3,
                                            2)
                    attention_k = attention_k * 255
                    attention_k += tmp_x
                    attention_k /= 2.0
                    attention_k[attention_k > 255] = 255
                    attention_k = (attention_k).astype(np.uint8)
                    scipy.misc.imsave(vis_path + ('%02d.jpg' % k), attention_k)

                plt.clf()
                plt.plot(all_loss)
                plt.show()
                plt.savefig(vis_path + 'loss.png')

            global_step += 1

        if (e % num_epoch_to_save == (num_epoch_to_save - 1)):
            print('Saving at epoch %d/%d' % (e, num_e))
            torch.save(
                {
                    'state_dict': net.state_dict(),
                    'opt': optimizer.state_dict()
                }, save_path + save_name + ('_%03d' % e) + '.dat')
            metadata = [e, running_loss, all_loss, lr]
            util.save_list(metadata,
                           save_path + meta_name + ('_%03d' % e) + '.dat')

        last_e = e