Exemple #1
0
def backward_raytracing(scene, light, width, height, n0, n1, d=2):
    """
    Shoot paths from the lights and store the new information in each object
    color texture
    """
    print("Illuminating with light raytracing...")
    sx = 2
    sy = 2
    iterations = height * width
    step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')
    counter = 0
    # n1 = DEFAULT_N2
    # n0 = np.cross(light.nl, n1)
    p00 = light.position + d * light.nl - (sx / 2) * n0 - (sy / 2) * n1
    bar = Bar('Illuminating', max=100 / PERCENTAGE_STEP)
    # This is needed to use it in Git Bash
    bar.check_tty = False
    for j in range(height):
        for i in range(width):
            x = i
            y = height - 1 - j
            # Get x projected in view coord
            xp = (x / float(width)) * sx
            # Get y projected in view coord
            yp = (y / float(height)) * sy
            pp = p00 + xp * n0 + yp * n1
            npe = utils.normalize(pp - light.position)
            ray = Ray(light.position, npe)
            transport_light(ray, scene, light.color)
            counter += 1
            if counter % step_size == 0:
                bar.next()
    bar.finish()
def train(model, train_loader, epoch, criterion, optimizer, writer):
    # set training mode
    model.train()
    train_loss = 0.0
    iter_num = 0

    # Iterate over data.
    bar = Bar('Processing | {}'.format('train'), max=len(train_loader))
    bar.check_tty = False
    for i_iter, batch in enumerate(train_loader):
        sys.stdout.flush()
        start_time = time.time()
        iter_num += 1
        # adjust learning rate
        iters_per_epoch = len(train_loader)
        lr = adjust_learning_rate(optimizer,
                                  epoch,
                                  i_iter,
                                  iters_per_epoch,
                                  method=args.lr_mode)

        # get inputs
        image, target, _ = batch
        images, target = image.cuda(), target.long().cuda()

        torch.set_grad_enabled(True)

        # zero the parameter gradients
        optimizer.zero_grad()

        # compute output loss
        preds = model(images)
        loss = criterion(preds, [target])  # batch mean
        train_loss += loss.item()

        # compute gradient and do SGD step
        loss.backward()
        optimizer.step()

        if i_iter % 10 == 0:
            writer.add_scalar('learning_rate', lr,
                              iter_num + epoch * len(train_loader))
            writer.add_scalar('train_loss', train_loss / iter_num,
                              iter_num + epoch * len(train_loader))

        batch_time = time.time() - start_time
        # plot progress
        bar.suffix = '{} / {} | Time: {batch_time:.4f} | Loss: {loss:.4f}'.format(
            iter_num,
            len(train_loader),
            batch_time=batch_time,
            loss=train_loss / iter_num)
        bar.next()

    epoch_loss = train_loss / iter_num
    writer.add_scalar('train_epoch_loss', epoch_loss, epoch)
    bar.finish()

    return epoch_loss
Exemple #3
0
def train_step(train_loader, model, epoch, optimizer, criterion, args):

    # switch to train mode
    model.train()
    epoch_loss = 0.0
    loss_w = args.loss_w

    iters_per_epoch = len(train_loader)
    bar = Bar('Processing {} Epoch -> {} / {}'.format('train', epoch + 1,
                                                      args.epochs),
              max=iters_per_epoch)
    bar.check_tty = False

    for step, (imagesA, imagesB, imagesC, labels) in enumerate(train_loader):
        start_time = time.time()

        torch.set_grad_enabled(True)

        imagesA = imagesA.cuda()
        imagesB = imagesB.cuda()
        imagesC = imagesC.cuda()

        labels = labels.cuda()

        out_A, out_B, out_C, out_F, combine = model(imagesA, imagesB, imagesC)

        loss_x = criterion(out_A, labels)
        loss_y = criterion(out_B, labels)
        loss_z = criterion(out_C, labels)
        loss_c = criterion(out_F, labels)
        loss_f = criterion(combine, labels)

        lossValue = loss_w[0] * loss_x + loss_w[1] * loss_y + loss_w[
            2] * loss_z + loss_w[3] * loss_c + loss_w[4] * loss_f

        optimizer.zero_grad()
        lossValue.backward()
        optimizer.step()

        # measure elapsed time
        epoch_loss += lossValue.item()
        end_time = time.time()
        batch_time = end_time - start_time
        # plot progress
        bar_str = '{} / {} | Time: {batch_time:.2f} mins | Loss: {loss:.4f} '
        bar.suffix = bar_str.format(step + 1,
                                    iters_per_epoch,
                                    batch_time=batch_time *
                                    (iters_per_epoch - step) / 60,
                                    loss=lossValue.item())
        bar.next()

    epoch_loss = epoch_loss / iters_per_epoch

    bar.finish()
    return epoch_loss
Exemple #4
0
def main():
    # Initiate FAST object with default values
    fast = cv.FastFeatureDetector_create(threshold=THRESHOLD)
    print("Processing...")
    timer = utils.Timer()
    timer.start()
    # Create frames from video
    # reader = imageio.get_reader(f'imageio:videos/explosions.mp4')
    # for i, img in enumerate(reader):
    # For creating video
    if not os.path.exists(VIDEOS_DIR):
        os.mkdir(VIDEOS_DIR)
    writer = imageio.get_writer(OUT_VIDEO_FILENAME, fps=FPS)
    print(f"Getting keypoints from video {VIDEO_FILENAME}...")
    # Reading video
    cap = cv.VideoCapture(VIDEO_FILENAME)
    counter = 0
    total_frames = DURATION * FPS
    step_size = np.ceil(total_frames / 100).astype(int)
    bar = Bar("Processing...", max=100, suffix='%(percent)d%%')
    bar.check_tty = False
    particles = []
    h = None
    w = None
    while cap.isOpened():
        ret, frame = cap.read()
        if not h:
            w, h, _ = frame.shape
        kp = fast.detect(frame, None)
        # Get list of keypoints per frame
        keypoints = process_keypoints(kp)
        # Apply effect to keypoints (create particles per frame)
        delta_time = 1 / FPS
        particles = particle_effects(keypoints, particles, delta_time, w, h)
        # Render the particles into an image for the output video
        img_arr = render(particles)
        # Append rendered image into video
        writer.append_data(img_arr)
        # Write rendered image into image file
        if counter < 90:
            img = Image.fromarray(img_arr)
            output_img_filename = f"output/{counter}.jpg"
            img.save(output_img_filename, quality=MAX_QUALITY)
        counter += 1
        if counter % step_size == 0:
            bar.next()
        # Check if this is the end of the video
        if not ret:
            break
    cap.release()
    bar.finish()
    print("Writing video")
    writer.close()
    timer.stop()
    print(f"Total time spent {timer}")
Exemple #5
0
def render_aa_t(scene,
                camera,
                func,
                HEIGHT=100,
                WIDTH=100,
                V_SAMPLES=4,
                H_SAMPLES=4):
    """
    Render the image for the given scene and camera using a template function.

    Args:
        scene(Scene): The scene that contains objects, cameras and lights.
        camera(Camera): The camera that is rendering this image.

    Returns:
        numpy.array: The pixels with the raytraced colors.
    """
    output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)
    if not scene or scene.is_empty() or not camera or camera.inside(
            scene.objects):
        print("Cannot generate an image")
        return output
    total_samples = H_SAMPLES * V_SAMPLES
    # This is for showing progress %
    iterations = HEIGHT * WIDTH
    step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')
    counter = 0
    bar = Bar('Rendering', max=100 / PERCENTAGE_STEP)
    # This is needed to use it in Git Bash
    bar.check_tty = False
    for j in range(HEIGHT):
        for i in range(WIDTH):
            color = np.array([0, 0, 0], dtype=float)
            for n in range(V_SAMPLES):
                for m in range(H_SAMPLES):
                    r0, r1 = np.random.random_sample(2)
                    # Floats x, y inside the image plane grid
                    x = i + ((float(m) + r0) / H_SAMPLES)
                    y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)
                    # Get x projected in view coord
                    xp = (x / float(WIDTH)) * camera.scale_x
                    # Get y projected in view coord
                    yp = (y / float(HEIGHT)) * camera.scale_y
                    pp = camera.p00 + xp * camera.n0 + yp * camera.n1
                    npe = utils.normalize(pp - camera.position)
                    ray = Ray(pp, npe)

                    color += func(ray, scene) / float(total_samples)
            counter += 1
            if counter % step_size == 0:
                bar.next()
            output[j][i] = color.round().astype(np.uint8)
    bar.finish()
    return output
def validation(model, val_loader, epoch, writer):
    # set evaluate mode
    model.eval()

    total_correct, total_label = 0, 0
    hist = np.zeros((args.num_classes, args.num_classes))

    # Iterate over data.
    bar = Bar('Processing {}'.format('val'), max=len(val_loader))
    bar.check_tty = False
    for idx, batch in enumerate(val_loader):
        image, target, _ = batch
        image, target = image.cuda(), target.cuda()
        with torch.no_grad():
            h, w = target.size(1), target.size(2)
            outputs = model(image)
            outputs = gather(outputs, 0, dim=0)
            preds = F.interpolate(input=outputs[0], size=(h, w), mode='bilinear', align_corners=True)
            if idx % 50 == 0:
                img_vis = inv_preprocess(image, num_images=args.save_num)
                label_vis = decode_predictions(target.int(), num_images=args.save_num, num_classes=args.num_classes)
                pred_vis = decode_predictions(torch.argmax(preds, dim=1), num_images=args.save_num,
                                              num_classes=args.num_classes)

                # visual grids
                img_grid = torchvision.utils.make_grid(torch.from_numpy(img_vis.transpose(0, 3, 1, 2)))
                label_grid = torchvision.utils.make_grid(torch.from_numpy(label_vis.transpose(0, 3, 1, 2)))
                pred_grid = torchvision.utils.make_grid(torch.from_numpy(pred_vis.transpose(0, 3, 1, 2)))
                writer.add_image('val_images', img_grid, epoch * len(val_loader) + idx + 1)
                writer.add_image('val_labels', label_grid, epoch * len(val_loader) + idx + 1)
                writer.add_image('val_preds', pred_grid, epoch * len(val_loader) + idx + 1)

            # pixelAcc
            correct, labeled = batch_pix_accuracy(preds.data, target)
            # mIoU
            hist += fast_hist(preds, target, args.num_classes)

            total_correct += correct
            total_label += labeled
            pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
            IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
            # plot progress
            bar.suffix = '{} / {} | pixAcc: {pixAcc:.4f}, mIoU: {IoU:.4f}'.format(idx + 1, len(val_loader),
                                                                                  pixAcc=pixAcc, IoU=IoU)
            bar.next()

    mIoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)

    writer.add_scalar('val_pixAcc', pixAcc, epoch)
    writer.add_scalar('val_mIoU', mIoU, epoch)
    bar.finish()

    return pixAcc, mIoU
Exemple #7
0
def create_normals(normal_map):
    h, w, _ = normal_map.shape
    iterations = w * h
    step_size = np.ceil(iterations / 100).astype(int)
    normals = np.zeros((h, w, NORMAL_DIMENSIONS))
    counter = 0
    bar = Bar("Processing Normals...", max=100, suffix='%(percent)d%%')
    bar.check_tty = False
    for i in range(w):
        for j in range(h):
            normals[j][i] = utils.adjust(normal_map[j][i][:3])
            counter += 1
            if counter % step_size == 0:
                bar.next()
    bar.finish()
    np.save(NORMAL_VECTORS_FILENAME, normals)
    print(f"Normal vectors stored in {NORMAL_VECTORS_FILENAME}")
    return normals
Exemple #8
0
def validation(model, val_loader, epoch, writer):
    # set evaluate mode
    model.eval()

    total_correct, total_label = 0, 0
    hist = np.zeros((args.num_classes, args.num_classes))

    # Iterate over data.
    bar = Bar('Processing {}'.format('val'), max=len(val_loader))
    bar.check_tty = False
    for idx, batch in enumerate(val_loader):
        image, target, _ = batch
        image, target = image.cuda(), target.cuda()
        with torch.no_grad():
            h, w = target.size(1), target.size(2)
            outputs = model(image)
            outputs = gather(outputs, 0, dim=0)
            preds = F.interpolate(input=outputs[0],
                                  size=(h, w),
                                  mode='bilinear',
                                  align_corners=True)

            # pixelAcc
            correct, labeled = batch_pix_accuracy(preds.data, target)
            # mIoU
            hist += fast_hist(preds, target, args.num_classes)

            total_correct += correct
            total_label += labeled
            pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
            IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
            # plot progress
            bar.suffix = '{} / {} | pixAcc: {pixAcc:.4f}, mIoU: {IoU:.4f}'.format(
                idx + 1, len(val_loader), pixAcc=pixAcc, IoU=IoU)
            bar.next()

    mIoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)

    writer.add_scalar('val_pixAcc', pixAcc, epoch)
    writer.add_scalar('val_mIoU', mIoU, epoch)
    bar.finish()

    return pixAcc, mIoU
Exemple #9
0
def render(scene, camera, HEIGHT=100, WIDTH=100):
    """
    Render the image for the given scene and camera using raytracing.

    Args:
        scene(Scene): The scene that contains objects, cameras and lights.
        camera(Camera): The camera that is rendering this image.

    Returns:
        numpy.array: The pixels with the raytraced colors.
    """
    output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)
    if not scene or scene.is_empty() or not camera or camera.inside(
            scene.objects):
        print("Cannot generate an image")
        return output
    # This is for showing progress %
    iterations = HEIGHT * WIDTH
    step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')
    counter = 0
    bar = Bar('Raytracing', max=100 / PERCENTAGE_STEP)
    # This is needed to use it in Git Bash
    bar.check_tty = False
    for j in range(HEIGHT):
        for i in range(WIDTH):
            x = i
            y = HEIGHT - 1 - j
            # Get x projected in view coord
            xp = (x / float(WIDTH)) * camera.scale_x
            # Get y projected in view coord
            yp = (y / float(HEIGHT)) * camera.scale_y
            pp = camera.p00 + xp * camera.n0 + yp * camera.n1
            npe = utils.normalize(pp - camera.position)
            ray = Ray(pp, npe)
            color = raytrace(ray, scene)
            output[j][i] = color.round().astype(np.uint8)
            counter += 1
            if counter % step_size == 0:
                bar.next()
    bar.finish()
    return output
Exemple #10
0
def render_dof(scene, camera, HEIGHT=100, WIDTH=100, V_SAMPLES=6, H_SAMPLES=6):
    """
    Render the image for the given scene and camera using raytracing with
    depth of field.

    Args:
        scene(Scene): The scene that contains objects, cameras and lights.
        camera(Camera): The camera that is rendering this image.

    Returns:
        numpy.array: The pixels with the raytraced colors.
    """
    output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)
    if not scene or scene.is_empty() or not camera or camera.inside(
            scene.objects):
        print("Cannot generate an image")
        return output
    total_samples = H_SAMPLES * V_SAMPLES
    # This is for showing progress %
    iterations = HEIGHT * WIDTH * total_samples
    step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')
    counter = 0
    bar = Bar('Raytracing', max=100 / PERCENTAGE_STEP)
    # This is needed to use it in Git Bash
    bar.check_tty = False
    for j in range(HEIGHT):
        for i in range(WIDTH):
            color = np.array([0, 0, 0], dtype=float)
            lens_sample_offsets = []
            n0 = camera.n0
            n1 = camera.n1
            for n in range(V_SAMPLES):
                for m in range(H_SAMPLES):
                    r0, r1 = np.random.random_sample(2)
                    ap_sx = camera.lens_params.ap_sx
                    ap_sy = camera.lens_params.ap_sy
                    x_offset = ((r0 - 0.5) * m) / H_SAMPLES * ap_sx
                    y_offset = ((r1 - 0.5) * n) / V_SAMPLES * ap_sy
                    lens_sample_offsets.append((x_offset, y_offset))
            random_start = np.random.random_integers(0, total_samples - 1)
            for n in range(V_SAMPLES):
                for m in range(H_SAMPLES):
                    r0, r1 = np.random.random_sample(2)
                    x = i + ((float(m) + r0) / H_SAMPLES)
                    y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)
                    # Get x projected in view coord
                    xp = (x / float(WIDTH)) * camera.scale_x
                    # Get y projected in view coord
                    yp = (y / float(HEIGHT)) * camera.scale_y
                    pp = camera.p00 + xp * camera.n0 + yp * camera.n1
                    npe = utils.normalize(pp - camera.position)
                    sample_idx = n + m * H_SAMPLES - random_start
                    x_offset, y_offset = lens_sample_offsets[sample_idx]
                    ps = pp + x_offset * n0 + y_offset * n1
                    fp = pp + npe * camera.lens_params.f
                    director = utils.normalize(fp - ps)
                    ray = Ray(ps, director)

                    color += raytrace(ray, scene) / float(total_samples)
                    counter += 1
                    if counter % step_size == 0:
                        bar.next()
            output[j][i] = color.round().astype(np.uint8)
    bar.finish()
    return output
Exemple #11
0
def train_step(train_loader, model, epoch, optimizer, criterion, args):

    # switch to train mode
    model.train()
    epoch_loss = 0.0
    loss_w = args.loss_w

    iters_per_epoch = len(train_loader)
    bar = Bar('Processing {} Epoch -> {} / {}'.format('train', epoch + 1,
                                                      args.epochs),
              max=iters_per_epoch)
    bar.check_tty = False

    for step, (imagesA, imagesB, imagesC, labels) in enumerate(train_loader):
        start_time = time.time()

        torch.set_grad_enabled(True)

        imagesA = imagesA.cuda()

        labels = labels.cuda()
        labels = labels[:, 0, :]

        out_A, out_std_A = model(imagesA)

        loss_x = criterion[0](out_A, labels)

        weight_out_std_A = torch.sqrt(torch.exp(out_std_A))

        weight_factor = 0.5
        loss_x = torch.mean(loss_x * (1 + weight_factor * weight_out_std_A))

        # entropy loss
        #weight_entropy = 0.001

        #entropy_loss_x =  entropy_loss(out_A) * weight_entropy * ( 1 + torch.sum(weight_out_std_A))

        # unc loss
        weight_unc_factor = 0.001
        uncloss_x = criterion[1](out_A, out_std_A, labels) * weight_unc_factor

        lossValue = 1 * (loss_x + uncloss_x)
        #lossValue = (lossValue-0.2).abs() + 0.2

        optimizer.zero_grad()
        lossValue.backward()
        optimizer.step()

        # measure elapsed time
        epoch_loss += lossValue.item()
        end_time = time.time()
        batch_time = end_time - start_time
        # plot progress
        bar_str = '{} / {} | Time: {batch_time:.2f} mins | Loss: {loss:.4f} '
        bar.suffix = bar_str.format(step + 1,
                                    iters_per_epoch,
                                    batch_time=batch_time *
                                    (iters_per_epoch - step) / 60,
                                    loss=lossValue.item())
        bar.next()

    epoch_loss = epoch_loss / iters_per_epoch

    bar.finish()
    return epoch_loss
Exemple #12
0
#     if best_metric > validation_loss:
#         best_metric = validation_loss
#         best_iter = epoch
#         model_save_file = os.path.join(args.save_dir, args.save_model + '.tar')
#         if not os.path.exists(args.save_dir):
#             os.makedirs(args.save_dir)
#         torch.save({'state_dict': model.state_dict(), 'best_loss': best_metric}, model_save_file)
#         print('Model saved to %s' % model_save_file)


# Testing
outPRED_mcs = torch.FloatTensor().cuda()
model.eval()
iters_per_epoch = len(test_loader)
bar = Bar('Processing {}'.format('inference'), max=len(test_loader))
bar.check_tty = False
for epochID, (imagesA, imagesB, imagesC) in enumerate(test_loader):
    imagesA = imagesA.cuda()
    imagesB = imagesB.cuda()
    imagesC = imagesC.cuda()

    begin_time = time.time()
    _, _, _, _, result_mcs = model(imagesA, imagesB, imagesC)
    outPRED_mcs = torch.cat((outPRED_mcs, result_mcs.data), 0)
    batch_time = time.time() - begin_time
    bar.suffix = '{} / {} | Time: {batch_time:.4f}'.format(epochID + 1, len(test_loader),
                                                           batch_time=batch_time * (iters_per_epoch - epochID) / 60)
    bar.next()
bar.finish()

# save result into excel:
def validation(model, val_loader, epoch, writer):
    # set evaluate mode
    model.eval()

    total_correct, total_label = 0, 0
    total_correct_hb, total_label_hb = 0, 0
    total_correct_fb, total_label_fb = 0, 0
    hist = np.zeros((args.num_classes, args.num_classes))
    hist_hb = np.zeros((args.hbody_cls, args.hbody_cls))
    hist_fb = np.zeros((args.fbody_cls, args.fbody_cls))

    # Iterate over data.
    bar = Bar('Processing {}'.format('val'), max=len(val_loader))
    bar.check_tty = False
    for idx, batch in enumerate(val_loader):
        # image, target, hlabel, flabel, _ = batch
        # image, target, hlabel, flabel = image.cuda(), target.cuda(), hlabel.cuda(), flabel.cuda()

        image, target, hlabel, flabel, _, _ = batch
        image, target, hlabel, flabel = image.cuda(), target.cuda(
        ), hlabel.cuda(), flabel.cuda()
        with torch.no_grad():
            h, w = target.size(1), target.size(2)
            outputs = model(image)
            outputs = gather(outputs, 0, dim=0)
            preds = F.interpolate(input=outputs[0],
                                  size=(h, w),
                                  mode='bilinear',
                                  align_corners=True)
            preds_hb = F.interpolate(input=outputs[1],
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
            preds_fb = F.interpolate(input=outputs[2],
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
            # if idx % 50 == 0:
            #     img_vis = inv_preprocess(image, num_images=args.save_num)
            #     label_vis = decode_predictions(target.int(), num_images=args.save_num, num_classes=args.num_classes)
            #     pred_vis = decode_predictions(torch.argmax(preds, dim=1), num_images=args.save_num,
            #                                   num_classes=args.num_classes)
            #
            #     # visual grids
            #     img_grid = torchvision.utils.make_grid(torch.from_numpy(img_vis.transpose(0, 3, 1, 2)))
            #     label_grid = torchvision.utils.make_grid(torch.from_numpy(label_vis.transpose(0, 3, 1, 2)))
            #     pred_grid = torchvision.utils.make_grid(torch.from_numpy(pred_vis.transpose(0, 3, 1, 2)))
            #     writer.add_image('val_images', img_grid, epoch * len(val_loader) + idx + 1)
            #     writer.add_image('val_labels', label_grid, epoch * len(val_loader) + idx + 1)
            #     writer.add_image('val_preds', pred_grid, epoch * len(val_loader) + idx + 1)

            # pixelAcc
            correct, labeled = batch_pix_accuracy(preds.data, target)
            correct_hb, labeled_hb = batch_pix_accuracy(preds_hb.data, hlabel)
            correct_fb, labeled_fb = batch_pix_accuracy(preds_fb.data, flabel)
            # mIoU
            hist += fast_hist(preds, target, args.num_classes)
            hist_hb += fast_hist(preds_hb, hlabel, args.hbody_cls)
            hist_fb += fast_hist(preds_fb, flabel, args.fbody_cls)

            total_correct += correct
            total_correct_hb += correct_hb
            total_correct_fb += correct_fb
            total_label += labeled
            total_label_hb += labeled_hb
            total_label_fb += labeled_fb
            pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
            IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
            pixAcc_hb = 1.0 * total_correct_hb / (np.spacing(1) +
                                                  total_label_hb)
            IoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
            pixAcc_fb = 1.0 * total_correct_fb / (np.spacing(1) +
                                                  total_label_fb)
            IoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)
            # plot progress
            bar.suffix = '{} / {} | pixAcc: {pixAcc:.4f}, mIoU: {IoU:.4f} |' \
                         'pixAcc_hb: {pixAcc_hb:.4f}, mIoU_hb: {IoU_hb:.4f} |' \
                         'pixAcc_fb: {pixAcc_fb:.4f}, mIoU_fb: {IoU_fb:.4f}'.format(idx + 1, len(val_loader),
                                                                                    pixAcc=pixAcc, IoU=IoU,
                                                                                    pixAcc_hb=pixAcc_hb, IoU_hb=IoU_hb,
                                                                                    pixAcc_fb=pixAcc_fb, IoU_fb=IoU_fb)
            bar.next()

    print('\n per class iou part: {}'.format(per_class_iu(hist) * 100))
    print('per class iou hb: {}'.format(per_class_iu(hist_hb) * 100))
    print('per class iou fb: {}'.format(per_class_iu(hist_fb) * 100))

    mIoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
    # mIoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
    # mIoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)

    # writer.add_scalar('val_pixAcc', pixAcc, epoch)
    # writer.add_scalar('val_mIoU', mIoU, epoch)
    # writer.add_scalar('val_pixAcc_hb', pixAcc_hb, epoch)
    # writer.add_scalar('val_mIoU_hb', mIoU_hb, epoch)
    # writer.add_scalar('val_pixAcc_fb', pixAcc_fb, epoch)
    # writer.add_scalar('val_mIoU_fb', mIoU_fb, epoch)
    bar.finish()

    return pixAcc, mIoU
def train(model, train_loader, epoch, criterion, optimizer, writer):
    # set training mode
    model.train()
    train_loss = 0.0
    iter_num = 0
    kld_lambda_1 = 1.0
    kld_lambda_2 = 1.0
    # Iterate over data.
    bar = Bar('Processing | {}'.format('train'), max=len(train_loader))
    bar.check_tty = False
    for i_iter, batch in enumerate(train_loader):
        sys.stdout.flush()
        start_time = time.time()
        iter_num += 1
        # adjust learning rate
        iters_per_epoch = len(train_loader)
        lr = adjust_learning_rate(optimizer,
                                  epoch,
                                  i_iter,
                                  iters_per_epoch,
                                  method=args.lr_mode)

        # image, label, hlabel, flabel, _ = batch
        # images, labels, hlabel, flabel = image.cuda(), label.long().cuda(), hlabel.cuda(), flabel.cuda()

        image, label, hlabel, flabel, lr_label, _ = batch
        images, labels, hlabel, flabel, lr_labels = image.cuda(), label.long(
        ).cuda(), hlabel.cuda(), flabel.cuda(), lr_label.cuda()

        torch.set_grad_enabled(True)

        # zero the parameter gradients
        optimizer.zero_grad()

        # compute output loss
        preds = model(images)

        # # Apply exponential decay to the AAF loss.
        # current_step = epoch * iters_per_epoch + i_iter
        # max_step = args.epochs * iters_per_epoch
        # dec = torch.pow(torch.tensor(1e2, requires_grad=False), -current_step / max_step).cuda()

        # loss = criterion(preds, [labels, hlabel, flabel], dec)  # batch mean
        # loss = criterion(preds, [labels, hlabel, flabel])  # batch mean
        loss = criterion(preds,
                         [labels, hlabel, flabel, lr_labels])  # batch mean

        # loss = loss[0]+dec*loss[1]
        # # print(loss)

        # aaf_loss = torch.mean(eloss_1) * kld_lambda_1 * dec
        # aaf_loss += torch.mean(eloss_2) * kld_lambda_1 * dec
        # aaf_loss += torch.mean(eloss_3) * kld_lambda_1 * dec
        # aaf_loss += torch.mean(neloss_1) * kld_lambda_2 * dec
        # aaf_loss += torch.mean(neloss_2) * kld_lambda_2 * dec
        # aaf_loss += torch.mean(neloss_3) * kld_lambda_2 * dec
        # loss = loss

        train_loss += loss.item()

        # compute gradient and do SGD step
        loss.backward()
        optimizer.step()

        if i_iter % 10 == 0:
            writer.add_scalar('learning_rate', lr,
                              iter_num + epoch * len(train_loader))
            writer.add_scalar('train_loss', train_loss / iter_num,
                              iter_num + epoch * len(train_loader))

        batch_time = time.time() - start_time
        # plot progress
        bar.suffix = '{} / {} | Time: {batch_time:.4f} | Loss: {loss:.4f}'.format(
            iter_num,
            len(train_loader),
            batch_time=batch_time,
            loss=train_loss / iter_num)
        bar.next()

    epoch_loss = train_loss / iter_num
    writer.add_scalar('train_epoch_loss', epoch_loss, epoch)
    bar.finish()

    return epoch_loss
Exemple #15
0
def TRI_Query(state=None,
              county=None,
              area_code=None,
              year=None,
              chunk_size=100000):
    """Query the EPA Toxic Release Inventory Database
    
    This function constructs a query for the EPA Toxic Release Inventory API, with optional arguments for details such as the two-letter state, county name, area code, and year.  More info here: https://www.epa.gov/enviro/envirofacts-data-service-api
    
    """

    base_url = 'https://data.epa.gov/efservice/'
    #Declare the names of the tables that we want to pull
    table_name1 = 'TRI_FACILITY'
    table_name2 = 'TRI_REPORTING_FORM'
    table_name3 = 'TRI_TRANSFER_QTY'

    output_format = 'CSV'
    query = base_url
    query += table_name1 + '/'
    #Add in the state qualifier, if the desired_state variable is named
    if state:
        query += 'state_abbr/=/' + state + '/'
    #Add in the county qualifier, if the desired_county variable is named
    if county:
        query += 'county_name/' + county + '/'
    #Add in the area code qualifier, if the desired_area_code variable is named
    if area_code:
        query += 'zip_code/' + str(area_code) + '/'
    #Add in the next table name and year qualifier, if the desired_year variable is named
    query += table_name2 + '/'
    if year:
        if type(year) is list:
            query += 'reporting_year/' + str(year[0]) + '/' + str(
                year[1]) + '/'
        else:
            query += 'reporting_year/' + str(year) + '/'
    #add the third table
    query += table_name3 + '/'
    count_query = query + 'count/'

    count_xml = requests.get(count_query).content

    nrows = int(
        BeautifulSoup(count_xml,
                      features="lxml").find('requestrecordcount').contents[0])

    #Add in the desired output format to the query
    csv_query = query + output_format

    #Return the completed query
    bar = Bar('Downloading Records:',max=nrows,\
              suffix='%(index)d/%(max)d %(percent).1f%% - %(eta)ds')
    bar.check_tty = False
    s = requests.get(csv_query).content
    dataframe = pd.read_csv(io.StringIO(s.decode('utf-8')),
                            engine='python',
                            encoding='utf-8',
                            error_bad_lines=False)
    bar.next(n=dataframe.shape[0])
    nrows_prev = dataframe.shape[0]

    while dataframe.shape[0] < nrows:
        new_query = query + 'rows/'+str(dataframe.shape[0])+':'\
                        +str(dataframe.shape[0]+chunk_size)+'/'
        csv_query = new_query + output_format
        s = requests.get(csv_query).content
        dataframe = dataframe.append(
            pd.read_csv(io.StringIO(s.decode('utf-8')),
                        engine='python',
                        encoding='utf-8',
                        error_bad_lines=False))
        bar.next(n=dataframe.shape[0] - nrows_prev)
        nrows_prev = dataframe.shape[0]

    bar.finish()
    # do the replacement:
    if 'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT' in dataframe.columns:
        dataframe.replace(
            {'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT': wm_dict},
            inplace=True)
    return dataframe