Example #1
0
    def __call__(self, D, G, input, target):
        loss_D = 0
        loss_G = 0
        loss_G_FM = 0

        fake = G(input)

        real_features = D(torch.cat((input, target), dim=1))
        fake_features = D(torch.cat((input, fake.detach()), dim=1))

        for i in range(self.n_D):
            real_grid = get_grid(real_features[i][-1],
                                 is_real=True).to(self.device, self.dtype)
            fake_grid = get_grid(fake_features[i][-1],
                                 is_real=False).to(self.device, self.dtype)
            # it doesn't need to be fake_features

            loss_D += (self.criterion(real_features[i][-1], real_grid) +
                       self.criterion(fake_features[i][-1], fake_grid)) * 0.5

        fake_features = D(torch.cat((input, fake), dim=1))

        for i in range(self.n_D):
            real_grid = get_grid(fake_features[i][-1],
                                 is_real=True).to(self.device, self.dtype)
            loss_G += self.criterion(fake_features[i][-1], real_grid)

            if self.opt.HD:
                for j in range(len(fake_features[0])):
                    loss_G_FM += self.FMcriterion(fake_features[i][j],
                                                  real_features[i][j].detach())
                loss_G += loss_G_FM * (1.0 / self.opt.n_D) * self.opt.lambda_FM

        return loss_D, loss_G, target, fake
Example #2
0
def part_1():
    grid = utils.get_grid(__file__, grid_cls=Grid, value_transformer=PointType, delimiter='', cast=str)
    while True:
        grid, changes = apply_adjacency_rules(grid)
        if not changes:
            print(len(grid.get_occupied_seats()))
            return
Example #3
0
def part_2_bfs():
    grid = utils.get_grid(__file__, delimiter='')

    # We find all low points like in part 1, but perform a classic flood fill algorithm
    # via BFS starting from low points, and filling it upwards.
    #
    # Each fill from a low point will be guaranteed to consist of a single basin,
    # since we can assume that every node not maximum height is only part of 1 basin.
    basins = []
    for low_point in get_low_points(grid):
        queue = collections.deque([low_point])
        basin = set([low_point])
        visited = set([low_point])

        while queue:
            point = queue.popleft()

            for neighbor in grid.neighbors(point):
                if neighbor not in visited:
                    visited.add(neighbor)

                    if grid[neighbor] != 9:
                        basin.add(neighbor)
                        queue.append(neighbor)

        basins.append(basin)

    top_3 = sorted(basins, key=len, reverse=True)[:3]
    print(math.prod(len(basin) for basin in top_3))
def test(epoch):
    imgs = iter(test_loader).next()
    imgs = imgs.cuda()
    pred = model(imgs)
    I = get_grid(imgs.detach().cpu(), pred.detach().cpu())
    I = Image.fromarray(I.numpy())
    I.save('./results/{}.jpg'.format(epoch))
Example #5
0
def part_2_dag():
    grid = utils.get_grid(__file__, delimiter='')

    # We find all low points like in part 1, but create a directed downflow graph,
    # where u -> v is a directed edge if there is downwards flow from u to v, and u
    # is not a node of maximal height.
    #
    # For a given low point, it will be a part of a DAG such that the low point
    # will be the singular leaf node of the DAG. Thus, the corresponding basin
    # will simply be all nodes in the DAG that can reach the low point as well as
    # the low point itself.
    downflow_graph = nx.DiGraph()

    for point in grid:
        for neighbor in grid.neighbors(point):
            if grid[point] != 9 and grid[point] > grid[neighbor]:
                downflow_graph.add_edge(point, neighbor)

    basins = []
    for low_point in get_low_points(grid):
        basin = set([low_point]) | nx.ancestors(downflow_graph, low_point)
        basins.append(basin)

    top_3 = sorted(basins, key=len, reverse=True)[:3]
    print(math.prod(len(basin) for basin in top_3))
Example #6
0
    def __call__(self, D, G, input, target):
        loss_D = 0
        loss_G = 0
        loss_G_FM = 0

        fake = G(input)

        real_features = D(torch.cat((input, target), dim=1))
        fake_features = D(torch.cat((input, fake.detach()), dim=1))

        for i in range(self.opt.n_D):
            real_grid = get_grid(real_features[i][-1],
                                 is_real=True).to(self.device)
            fake_grid = get_grid(fake_features[i][-1], is_real=False).to(
                self.device)  # it doesn't need to be fake_features

            loss_D += (self.criterion(real_features[i][-1], real_grid) +
                       self.criterion(fake_features[i][-1], fake_grid)) * 0.5

        fake_features = D(torch.cat((input, fake), dim=1))

        for i in range(self.opt.n_D):
            for j in range(len(fake_features[0])):
                loss_G_FM += self.FMcriterion(fake_features[i][j],
                                              real_features[i][j].detach())

            real_grid = get_grid(fake_features[i][-1],
                                 is_real=True).to(self.device)

            loss_G += self.criterion(fake_features[i][-1], real_grid)

        loss_G += loss_G_FM * (1.0 / self.opt.n_D) * self.opt.lambda_FM

        if self.opt.VGG_loss:
            loss_G_VGG_FM = 0
            weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
            real_features_VGG, fake_features_VGG = self.VGGNet(
                target), self.VGGNet(fake)

            for i in range(len(real_features_VGG)):
                loss_G_VGG_FM += weights[i] * self.FMcriterion(
                    fake_features_VGG[i], real_features_VGG[i])
            loss_G += loss_G_VGG_FM * self.opt.lambda_FM

        return loss_D, loss_G, target, fake
Example #7
0
def part_1():
    grid = utils.get_grid(__file__, grid_cls=utils.DiagonalGrid, delimiter='')
    flash_count = 0

    for _ in range(100):
        flashed = run_step(grid)
        flash_count += len(flashed)

    print(flash_count)
Example #8
0
def part_2():
    slopes = [
        (1, 1),
        (1, 3),
        (1, 5),
        (1, 7),
        (2, 1),
    ]
    grid = utils.get_grid(__file__, grid_cls=Grid, delimiter='', cast=str)
    print(math.prod(get_trees_encountered(grid, slope) for slope in slopes))
Example #9
0
def part_2():
    grid = utils.get_grid(__file__, grid_cls=utils.DiagonalGrid, delimiter='')
    step = 0

    while True:
        step += 1
        flashed = run_step(grid)

        if len(flashed) == len(grid):
            print(step)
            break
Example #10
0
def part_1():
    grid = utils.get_grid(__file__, delimiter='', cast=str)
    step = 0

    while True:
        moves = move_east(grid)
        moves += move_south(grid)
        step += 1

        if not moves:
            print(step)
            break
Example #11
0
    def forward(self, x, flow):
        args = self.args
        # WarpingLayer uses F.grid_sample, which expects normalized grid
        # we still output unnormalized flow for the convenience of comparing EPEs with FlowNet2 and original code
        # so here we need to denormalize the flow
        flow_for_grip = torch.zeros_like(flow)
        flow_for_grip[:,0,:,:] = flow[:,0,:,:] / ((flow.size(3) - 1.0) / 2.0)
        flow_for_grip[:,1,:,:] = flow[:,1,:,:] / ((flow.size(2) - 1.0) / 2.0)

        grid = (get_grid(x).to(args.device) + flow_for_grip).permute(0, 2, 3, 1)
        x_warp = F.grid_sample(x, grid)
        return x_warp
Example #12
0
def part_2():
    grid = utils.get_grid(__file__, delimiter='')

    # We remove all nodes of maximal height from the original graph. This will partition
    # the grid into connected components, where the connected components are precisely
    # the basins.
    for point, value in grid.items():
        if value == 9:
            grid.graph.remove_node(point)

    basins = nx.connected_components(grid.graph)
    top_3 = sorted(basins, key=len, reverse=True)[:3]
    print(math.prod(len(basin) for basin in top_3))
Example #13
0
def nearest_warp(x, flow):
    grid_b, grid_y, grid_x = get_grid(x)
    flow = tf.cast(flow, tf.int32)

    _, h, w, _ = tf.unstack(tf.shape(x))
    warped_gy = tf.add(grid_y, flow[:,:,:,1]) # flow_y
    warped_gy = tf.clip_by_value(warped_gy, 0, h-1)
    warped_gx = tf.add(grid_x, flow[:,:,:,0]) # flow_x
    warped_gx = tf.clip_by_value(warped_gx, 0, w-1)
            
    warped_indices = tf.stack([grid_b, warped_gy, warped_gx], axis = 3)
            
    warped_x = tf.gather_nd(x, warped_indices)
    return warped_x
Example #14
0
def bilinear_warp(x, flow):
    _, h, w, _ = tf.unstack(tf.shape(x))
    grid_b, grid_y, grid_x = get_grid(x)
    grid_b = tf.cast(grid_b, tf.float32)
    grid_y = tf.cast(grid_y, tf.float32)
    grid_x = tf.cast(grid_x, tf.float32)

    fx, fy = tf.unstack(flow, axis = -1)
    fx_0 = tf.floor(fx)
    fx_1 = fx_0+1
    fy_0 = tf.floor(fy)
    fy_1 = fy_0+1

    # warping indices
    h_lim = tf.cast(h-1, tf.float32)
    w_lim = tf.cast(w-1, tf.float32)
    gy_0 = tf.clip_by_value(grid_y + fy_0, 0., h_lim)
    gy_1 = tf.clip_by_value(grid_y + fy_1, 0., h_lim)
    gx_0 = tf.clip_by_value(grid_x + fx_0, 0., w_lim)
    gx_1 = tf.clip_by_value(grid_x + fx_1, 0., w_lim)
    
    g_00 = tf.cast(tf.stack([grid_b, gy_0, gx_0], axis = 3), tf.int32)
    g_01 = tf.cast(tf.stack([grid_b, gy_0, gx_1], axis = 3), tf.int32)
    g_10 = tf.cast(tf.stack([grid_b, gy_1, gx_0], axis = 3), tf.int32)
    g_11 = tf.cast(tf.stack([grid_b, gy_1, gx_1], axis = 3), tf.int32)

    # gather contents
    x_00 = tf.gather_nd(x, g_00)
    x_01 = tf.gather_nd(x, g_01)
    x_10 = tf.gather_nd(x, g_10)
    x_11 = tf.gather_nd(x, g_11)

    # coefficients
    c_00 = tf.expand_dims((fy_1 - fy)*(fx_1 - fx), axis = 3)
    c_01 = tf.expand_dims((fy_1 - fy)*(fx - fx_0), axis = 3)
    c_10 = tf.expand_dims((fy - fy_0)*(fx_1 - fx), axis = 3)
    c_11 = tf.expand_dims((fy - fy_0)*(fx - fx_0), axis = 3)

    return c_00*x_00 + c_01*x_01 + c_10*x_10 + c_11*x_11
Example #15
0
def part_2():
    grid = utils.get_grid(__file__,
                          input_transformer=expand_map,
                          grid_cls=utils.DirectedGrid,
                          delimiter='')
    print(get_lowest_risk_path(grid))
Example #16
0
def part_1():
    grid = utils.get_grid(__file__, delimiter='')
    print(sum(grid[point] + 1 for point in get_low_points(grid)))
Example #17
0
# networks
af_plus = get_network('AF_plus')
feature_loss = get_network('feature_loss')
discrim = get_network('discriminator')
gan_loss = get_network('gan_loss') 

# loss function
l1_loss = torch.nn.L1Loss()

# adaptive scale space during training
sigma = Variable(2.0 * torch.ones(1), requires_grad=True).float()  # scale
gauss_conv = F.conv2d
gauss_update_method = Gaussian_Conv_Update()

# grid for bilinear sampling
grid = get_grid(cfg.data.image_size, cfg.data.border_size)

# optimizer for the network
param1 = list(af_plus.parameters())
param2 = [sigma]

optim = torch.optim.Adam([{'params':param2, 'lr':0.001, 'weight_decay':0.0},{'params':param1}],
                         lr=cfg.train.learning_rate, weight_decay=cfg.train.weight_decay)

# optimizer for the discriminator
optim_d = torch.optim.Adam(discrim.parameters(), lr=cfg.train.learning_rate, weight_decay=cfg.train.weight_decay)

ep = cfg.train.num_epochs # number of epochs

# initialize logging variables
train_loss = [] 
Example #18
0
def part_1():
    grid = utils.get_grid(__file__, grid_cls=utils.DirectedGrid, delimiter='')
    print(get_lowest_risk_path(grid))
Example #19
0
def part_1():
    grid = utils.get_grid(__file__, grid_cls=Grid, delimiter='', cast=str)
    print(get_trees_encountered(grid, (1, 3)))
Example #20
0
 def __init__(self):
     """Construct."""
     self.grid = get_grid()