def test(model_name, threshold=0.5, save=True, verbose=True, refine=False):
    classifications = np.array([0, 0, 0, 0])
    results_folder = os.path.join(model_name, 'results')
    if not os.path.exists(results_folder): os.mkdir(results_folder)
    _, test_set = get_dataset_split()
    if refine: test_set = sort_imgs(test_set)
    prediction = None
    model = keras.models.load_model(os.path.join(model_name,
                                                 model_name + '.h5'),
                                    custom_objects=get_custom_objects())
    for i in range(len(test_set)):
        if verbose: display_progress(i / len(test_set))
        img_path, gt_path = test_set[i].replace('\n', '').split(',')

        img = read_image(img_path, pad=(4, 4))
        img = normalise_img(img)
        ground_truth = read_gt(gt_path)
        ground_truth = np.squeeze(ground_truth)

        if refine:
            prediction = ground_truth if prediction is None else prediction
            pmap = create_map(prediction > 0.5, 1)
            prob = get_prob_map(pmap)

        prediction = model.predict(img)

        prediction = np.squeeze(prediction)
        prediction = prediction[4:-4, ...]

        prediction = (prediction > threshold).astype(np.uint8)
        if refine: prediction = prediction * prob

        classifications += getPixels(prediction, ground_truth, 0.5)

        if save:
            save_image(prediction,
                       os.path.join(results_folder, ntpath.basename(img_path)))
            save_image(
                ground_truth,
                os.path.join(
                    results_folder,
                    ntpath.basename(img_path).replace('.png', '_gt.png')))


#            if refine:
#                prob = prob.astype(np.uint8)
#                save_image(os.path.join(results_folder,
#                                ntpath.basename(img_path).replace('.png', '_prob.png')), prob)

    print(model_name, threshold)
    printMetrics(getMetrics(classifications))
示例#2
0
def main():
    saver = tf.train.Saver()
    if os.path.isdir('model'):
        shutil.rmtree('model')

    #Add labels to saved model as json file for classification
    os.makedirs('model')
    with open(utils.train_data_path, 'w') as file:
        train_data = {
            'labels': utils.labels
        }
        file.write(json.dumps(train_data))

    #Loading dataset and displaying information about it
    print('Count of images:')
    dataset_count = 0
    for labelID in range(len(utils.dataset)):
        label_count = len(utils.dataset[labelID])
        dataset_count += label_count
        print('{0}: {1}'.format(utils.labels[labelID], label_count))
    print('Total: {0}'.format(dataset_count))

    print('Loading test batch...')
    test_images, test_labels = utils.random_batch(utils.config['training']['batch_size_test'], True)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        last_acc = 0
        for i in range(utils.config['training']['n_epoch']+1):
            batch_X, batch_Y = utils.random_batch()

            # the back-propagation training step
            sess.run(net.train_step, feed_dict={net.X: batch_X, net.Y_: batch_Y, net.pkeep: utils.config['training']['pkeep']})
            acc, loss = sess.run([net.accuracy, net.cross_entropy], feed_dict={net.X: test_images, net.Y_: test_labels, net.pkeep: 1.0})

            #Model accuracy has improved since last epoch
            if acc > last_acc:
                last_acc = acc

                #Remove older save
                for file_name in os.listdir(os.path.join(utils.root_dir, 'model')):
                    file_path = os.path.join(utils.root_dir, 'model', file_name)
                    if os.path.isfile(file_path) and file_name != utils.train_data_name:
                        os.unlink(file_path)

                #Save new model
                save_path = saver.save(sess, utils.model_dir)

            #Display information obout progress
            utils.display_progress(i, acc, loss)
headers = utils.get_headers(token)

username = raw_input('Username: '******'next']

    # measures progress by page, not by result
    current_page += 1
    total_pages = (data['count'] / 20.0)
    utils.display_progress(current_page, total_pages)

    for result in data['results']:
        request_id = result['id']
        print "Working on request " + str(request_id)
        # get request first
        request_url = api_url + 'foia/%d/' % request_id
        print request_url
        request = requests.get(request_url, headers=headers)
        print request
        request_data = request.json()
        # get agency second
        agency_url = api_url + 'agency/%d/' % request_data['agency']
        agency = requests.get(agency_url , headers=headers)
        agency_data = agency.json()
        # get communications third
示例#4
0
        ]

        # forward pass
        logits = classifier(trn_imgs_batch_mixup_device)

        losses = criterion(logits, trn_lbls_oh_batch_mixup_device)

        total_trn_loss = .8 * losses[0] + .1 * losses[1] + .1 * losses[2]

        total_trn_loss.backward()
        optimizer.step()

        # record
        epoch_trn_loss.append(total_trn_loss.item())

        utils.display_progress(len(training_loader), j + 1,
                               {'training_loss': epoch_trn_loss[-1]})

    # validation
    classifier.eval()

    with torch.no_grad():
        for k, (vld_imgs_batch,
                vld_lbls_batch) in enumerate(validation_loader):

            # move to device
            vld_imgs_batch_device = vld_imgs_batch.cuda()
            vld_lbls_batch_device = [l.cuda() for l in vld_lbls_batch]
            vld_lbls_batch_numpy = [
                l.detach().cpu().numpy() for l in vld_lbls_batch
            ]
示例#5
0
url = utils.API_URL
token = utils.get_api_key()
headers = utils.get_headers(token)

username = raw_input('Username: '******'next']

    # measures progress by page, not by result
    current_page += 1
    total_pages = (data['count'] / 20.0)
    utils.display_progress(current_page, total_pages)

    for request in data['results']:
        request_id = request['id']
        request_url = 'https://www.muckrock.com/api_v1/foia/%s/' % str(
            request_id)
        # Removes the embargo date to make sure it permanently embargos.
        data = json.dumps({
            'embargo': True,
            'date_embargo': None,
            'permanent_embargo': True
        })
        http_request.patch(request_url, headers=headers, data=data)