def test_ars_optimizer(self):
     dim = 10
     n_generations = 30
     X = torch.Tensor([[i] for i in range(dim)])
     y = torch.ones(dim)
     n_pert = 100
     feature_dim = 2
     np.random.seed(seed=123456)
     ars_opt = ARSOptimizer(feature_dim, n_pert, rand_ars_params=True)
     for i in range(n_generations):
         perturbed_params = ars_opt.sample_perturbed_params()
         rewards = []
         for idx in range(0, len(perturbed_params)):
             pos_param, neg_param = perturbed_params[idx]
             pos_weight = torch.sigmoid(
                 torch.matmul(torch.column_stack((X, y)), pos_param))
             # ARSOptimizer works in an ascent manner,
             # thus a neg sign for minimizing objectives.
             r_pos = -self.metric(pos_weight.numpy())
             rewards.append(r_pos)
             neg_weight = torch.sigmoid(
                 torch.matmul(torch.column_stack((X, y)), neg_param))
             r_neg = -self.metric(neg_weight.numpy())
             rewards.append(r_neg)
         ars_opt.update_ars_params(torch.Tensor(rewards))
         new_weight = torch.sigmoid(
             torch.matmul(
                 torch.column_stack((X, y)),
                 torch.from_numpy(ars_opt.ars_params).float(),
             ))
         perf = self.metric(new_weight.numpy())
         print(f"gen {i}: perf {perf}")
     self.assertLessEqual(perf, 1e-15)
Exemple #2
0
    def forward(self, data):
        if self.training:
            return self.main(data)

        out = self.main[0](data)
        out1 = self.main[1](out)

        out2 = self.main[2](out1)
        out3 = self.main[3](out2)
        out4 = self.main[4](out3)

        out5 = self.main[5](out4)
        out6 = self.main[6](out5)
        out7 = self.main[7](out6)

        mp1 = nn.MaxPool2d(kernel_size=2, padding=0)
        mp2 = nn.MaxPool2d(kernel_size=2, padding=0)
        mp3 = nn.MaxPool2d(kernel_size=2, padding=0)

        if self.data_generation_mode is 1:
            flt1 = mp1(out1).flatten(start_dim=1)
            flt2 = mp2(out4).flatten(start_dim=1)
            flt3 = mp3(out7).flatten(start_dim=1)
        else:
            flt1 = mp1(out).flatten(start_dim=1)
            flt2 = mp2(out2).flatten(start_dim=1)
            flt3 = mp3(out5).flatten(start_dim=1)

        flt21 = torch.column_stack((flt1, flt2))
        final = torch.column_stack((flt21, flt3))

        return final
Exemple #3
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(2, 4, 2)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return (
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(y, i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
     )
Exemple #4
0
def custom_izhikevich_step(
    input_current: torch.Tensor,
    s: CustomIzhikevichState,
    p: IzhikevichParameters,
    dt: float = 0.001,
) -> Tuple[torch.Tensor, CustomIzhikevichState]:
    v_ = s.v + p.tau_inv * dt * (p.sq * s.v**2 + p.mn * s.v + p.bias - s.u +
                                 input_current)
    u_ = s.u + p.tau_inv * dt * p.a * (p.b * s.v - s.u)

    z_ = heaviside(v_ - p.v_th)
    v_ = (1 - z_) * v_ + z_ * p.c
    u_ = (1 - z_) * u_ + z_ * (u_ + p.d)

    # Todo Q:should this happen before or after membrane reset?
    #  > given OSTL paper this should happen after Cell computation
    s_ = CustomIzhikevichState(v_, u_, s.e)
    if s_.e.numel() > 0:
        last_e = s_.e[:, -1:]  # last eligibility trace
    else:
        last_e = None

    e_ = compute_eligibility_vector(s_, p, dt, last_e)
    if s_.e.numel() > 0:
        e_ = torch.column_stack((s_.e, e_))
    else:
        e_ = torch.unsqueeze(e_, 0)

    return z_, CustomIzhikevichState(v_, u_, e_)
Exemple #5
0
def get_gap_segments(segments: torch.Tensor) -> torch.Tensor:
    """
    Get the gap segments. 
    For example,
    torch.Tensor([[start1, end1], [start2, end2], [start3, end3]]) -> torch.Tensor([[end1, start2], [end2, start3]])
    """
    segments = segments[segments[:, 0].sort()[1]]
    return torch.column_stack((segments[:-1, 1], segments[1:, 0]))
Exemple #6
0
 def forward(self, screens, states):
     c = self.fe(screens)  # extract features from pixel input
     c = self.fc(torch.column_stack([c, states]))
     mu = self.mu(c)
     var = self.var(c)
     v = self.val(c)
     p = self.p(c)
     return mu, var, v, p
Exemple #7
0
 def tensor_indexing_ops(self):
     x = torch.randn(2, 4)
     y = torch.randn(4, 4)
     t = torch.tensor([[0, 0], [1, 0]])
     mask = x.ge(0.5)
     i = [0, 1]
     return len(
         torch.cat((x, x, x), 0),
         torch.concat((x, x, x), 0),
         torch.conj(x),
         torch.chunk(x, 2),
         torch.dsplit(torch.randn(2, 2, 4), i),
         torch.column_stack((x, x)),
         torch.dstack((x, x)),
         torch.gather(x, 0, t),
         torch.hsplit(x, i),
         torch.hstack((x, x)),
         torch.index_select(x, 0, torch.tensor([0, 1])),
         x.index(t),
         torch.masked_select(x, mask),
         torch.movedim(x, 1, 0),
         torch.moveaxis(x, 1, 0),
         torch.narrow(x, 0, 0, 2),
         torch.nonzero(x),
         torch.permute(x, (0, 1)),
         torch.reshape(x, (-1, )),
         torch.row_stack((x, x)),
         torch.select(x, 0, 0),
         torch.scatter(x, 0, t, x),
         x.scatter(0, t, x.clone()),
         torch.diagonal_scatter(y, torch.ones(4)),
         torch.select_scatter(y, torch.ones(4), 0, 0),
         torch.slice_scatter(x, x),
         torch.scatter_add(x, 0, t, x),
         x.scatter_(0, t, y),
         x.scatter_add_(0, t, y),
         # torch.scatter_reduce(x, 0, t, reduce="sum"),
         torch.split(x, 1),
         torch.squeeze(x, 0),
         torch.stack([x, x]),
         torch.swapaxes(x, 0, 1),
         torch.swapdims(x, 0, 1),
         torch.t(x),
         torch.take(x, t),
         torch.take_along_dim(x, torch.argmax(x)),
         torch.tensor_split(x, 1),
         torch.tensor_split(x, [0, 1]),
         torch.tile(x, (2, 2)),
         torch.transpose(x, 0, 1),
         torch.unbind(x),
         torch.unsqueeze(x, -1),
         torch.vsplit(x, i),
         torch.vstack((x, x)),
         torch.where(x),
         torch.where(t > 0, t, 0),
         torch.where(t > 0, t, t),
     )
Exemple #8
0
    def forward(self, screens, states):
        """ - screen is the pixel observation
            - obs is the concatenated current and target state

            Returns: combined features of the entire observation
        """
        c = self.conv(screens).view(screens.size()[0], -1)
        c = torch.column_stack([self.fc1(c), states])
        # return self.fc2(c)
        return c
Exemple #9
0
    def compute_metrics(truth: FullCatalog, pred: FullCatalog):

        # prepare magnitude bins
        mag_cuts2 = torch.arange(18, 24.5, 0.25)
        mag_cuts1 = torch.full_like(mag_cuts2, fill_value=-np.inf)
        mag_cuts = torch.column_stack((mag_cuts1, mag_cuts2))

        mag_bins2 = torch.arange(18, 25, 1.0)
        mag_bins1 = mag_bins2 - 1
        mag_bins = torch.column_stack((mag_bins1, mag_bins2))

        # compute metrics
        cuts_data = compute_mag_bin_metrics(mag_cuts, truth, pred)
        bins_data = compute_mag_bin_metrics(mag_bins, truth, pred)

        # data for scatter plot of misclassifications (over all magnitudes).
        tplocs = truth.plocs.reshape(-1, 2)
        eplocs = pred.plocs.reshape(-1, 2)
        tindx, eindx, dkeep, _ = reporting.match_by_locs(tplocs,
                                                         eplocs,
                                                         slack=1.0)

        # compute egprob separately for PHOTO
        egbool = pred["galaxy_bools"].reshape(-1)[eindx][dkeep]
        egprob = pred.get("galaxy_probs", None)
        egprob = egbool if egprob is None else egprob.reshape(-1)[eindx][dkeep]
        full_metrics = {
            "tgbool": truth["galaxy_bools"].reshape(-1)[tindx][dkeep],
            "egbool": egbool,
            "egprob": egprob,
            "tmag": truth["mags"].reshape(-1)[tindx][dkeep],
            "emag": pred["mags"].reshape(-1)[eindx][dkeep],
        }

        return {
            "mag_cuts": mag_cuts2,
            "mag_bins": mag_bins2,
            "cuts_data": cuts_data,
            "bins_data": bins_data,
            "full_metrics": full_metrics,
        }
Exemple #10
0
def to_corner_parametrization(bounding_box: Tensor) -> Tensor:
    """
    Converts from center/side-lengths parametrization to corners coordinates
    :param bounding_box: tensor [N, 4] of bounding boxes parameters in the form
    [x-coordinate of center, y-coordinate of center, width, height]
    :return: tensor [N, 4] of bounding box parameters in the form
    [x-coordinate of top left corner, y-coordinate of top left corner,
     x-coordinate of bottom right corner, y-coordinate of bottom right corner]
    """
    center_x, center_y, width, height = bounding_box.T
    half_w, half_h = width / 2, height / 2
    top_left_x, top_left_y = center_x - half_w, center_y - half_h
    bottom_right_x, bottom_right_y = center_x + half_w, center_y + half_h
    return torch.column_stack(
        (top_left_x, top_left_y, bottom_right_x, bottom_right_y))
Exemple #11
0
    def training_step(self, batch, batch_idx):
        x, y = batch
        y_pred = self(x)
        pixelwise_loss = mse_loss(y_pred, y)

        if half_edge_loss_factor > 0.0:
            y_coordinates = x[:, 0].long()
            x_coordinates = x[:, 1].long()
            x_coords_shifted_left = torch.clamp(x_coordinates - 1, min=0)
            y_coords_shifted_up = torch.clamp(y_coordinates - 1, min=0)

            image_indexes = torch.argmax(x[:, 2:], dim=1)

            x_left_coords = torch.column_stack(
                (y_coordinates, x_coords_shifted_left)).float()
            x_left = torch.clone(x)
            x_left[:, 0:2] = x_left_coords
            x_up_coords = torch.column_stack(
                (y_coords_shifted_up, x_coordinates)).float()
            x_up = torch.clone(x)
            x_up[:, 0:2] = x_up_coords

            y_pred_left = self(x_left)
            y_pred_up = self(x_up)
            dx_pred = y_pred - y_pred_left
            dy_pred = y_pred - y_pred_up

            dx_gt = dx_images[image_indexes, y_coordinates, x_coordinates]
            dy_gt = dy_images[image_indexes, y_coordinates, x_coordinates]
            dx_loss = mse_loss(dx_pred.flatten(), dx_gt)
            dy_loss = mse_loss(dy_pred.flatten(), dy_gt)

            return (0.5 * pixelwise_loss + half_edge_loss_factor * dx_loss +
                    half_edge_loss_factor * dy_loss)
        else:
            return pixelwise_loss
Exemple #12
0
def create_training_database():
    min = -10
    max = 10

    # create distribution of 1000 random pairs in range [-10, 10)
    pairs = (max - min) * torch.rand(1000, 2) + min

    l1: list[float] = [pair[0] for pair in pairs]
    l2: list[float] = [pair[1] for pair in pairs]

    l1: torch.Tensor = torch.tensor(l1)
    l2: torch.Tensor = torch.tensor(l2)

    function_results = torch.sin(l1 + (l2 / numpy.pi))

    # 3-dimensional tensor: (l1, l2, result)
    pairs_with_results = torch.column_stack((pairs, function_results))

    torch.save(pairs_with_results, "mydataset.dat")
Exemple #13
0
def generate_vad_segment_table_per_tensor(sequence: torch.Tensor, per_args: Dict[str, float]) -> torch.Tensor:
    """
    See description in generate_overlap_vad_seq.
    Use this for single instance pipeline. 
    """

    shift_length_in_sec = per_args['shift_length_in_sec']
    speech_segments = binarization(sequence, per_args)
    speech_segments = filtering(speech_segments, per_args)

    if speech_segments.shape == torch.Size([0]):
        return speech_segments

    speech_segments, _ = torch.sort(speech_segments, 0)

    dur = speech_segments[:, 1:2] - speech_segments[:, 0:1] + shift_length_in_sec
    speech_segments = torch.column_stack((speech_segments, dur))

    return speech_segments
Exemple #14
0
    def fit(self, A, Y, num_sample, threshold):
        num_iterations = math.inf
        iterations_done = 0
        num_samples = 3
        max_inlier_count = 0
        best_model = None
        probability_outlier = torch.scalar_tensor(0.5, dtype=torch.double)
        desired_prob = torch.scalar_tensor(0.95, dtype=torch.double)
        total_data = torch.column_stack((A, Y))
        data_size = len(total_data)

        while num_iterations > iterations_done:

            random.shuffle(total_data)
            sample_data = total_data[:num_samples, :]
            estimated_model = self.curve_fitting_model.fit(
                sample_data[:, :-1], sample_data[:, -1:])
            y_cap = torch.mm(A, estimated_model)
            error = torch.abs(Y - y_cap.T)
            inlier_count = torch.count_nonzero(error < threshold)

            if inlier_count > max_inlier_count:
                max_inlier_count = inlier_count
                best_model = estimated_model

            probability_outlier = 1 - inlier_count / data_size
            #print('# inliers:', inlier_count)
            #print('# prob_outlier:', probability_outlier)
            num_iterations = torch.log(1 - desired_prob) / torch.log(
                1 - (1 - probability_outlier)**num_sample)
            iterations_done = iterations_done + 1

            #print('# s:', iterations_done)
            #print('# n:', num_iterations)
            #print('# max_inlier_count: ', max_inlier_count)

        return best_model
import math

import torch

# distribution of 1000 random points in the domain [-10, 10]^2
maximum_value = 10
minimum_value = -10
input_tensor = (maximum_value - minimum_value) * torch.rand(1000,
                                                            2) + minimum_value

function_values = []

# value of f for each point
for i in range(1000):
    x1 = input_tensor[i][0]
    x2 = input_tensor[i][1]
    value = torch.sin(x1 + x2 / math.pi)
    function_values.append(value)

function_values = torch.tensor(function_values)
# create the pairs
pairs = torch.column_stack((input_tensor, function_values))
# print(pairs)

# save database
torch.save(pairs, "mydataset.dat")
Exemple #16
0
 def forward(self, screens, states):
     c = self.conv(screens).view(screens.size()[0], -1)
     c = torch.column_stack([self.fc1(c), states])
     # return self.fc2(c)
     return c
Exemple #17
0
import torch

n = 1000
mini = -10
maxi = 10

random_points = (mini - maxi) * torch.rand(n, 2) + maxi

PI = torch.tensor(3.14159265359)

points = []
results = []
for point in random_points:
    x1 = point[0]
    x2 = point[1]
    # f_(x1, x2) = sin(x1 + x2/pi)
    r = torch.sin(torch.addcdiv(x1, x2, PI))
    points.append(point)
    results.append(r)

points = torch.stack(points)
results = torch.stack(results)

database = torch.column_stack((points, results))
# print(database)
torch.save(database, "mydataset.dat")
Exemple #18
0
import torch
import json
import math

right, left, size = 10, -10, 1000
randomTensor = (right - left) * torch.rand((size, 2)) + left
firstCoordinate = []
secondCoordinate = []

for value in randomTensor:
    firstCoordinate.append(value[0])
    secondCoordinate.append(value[1])

firstCoordinate = torch.tensor(firstCoordinate)
secondCoordinate = torch.tensor(secondCoordinate)

functionValues = torch.sin(firstCoordinate + (secondCoordinate / math.pi))
dataset = torch.column_stack((randomTensor, functionValues))
print(dataset)
torch.save(dataset, "mydataset.dat")
Exemple #19
0
 def __createTensor(self):
     inputTensor = self.__generateInput()
     outputTensor = self.__computeFunctionValues(inputTensor)
     pairedTensor = torch.column_stack((inputTensor, outputTensor))
     return pairedTensor
Exemple #20
0
def createDistribution(n=1000):
    points = torch.rand(n, 2) * 20 - 10
    results = computeResults(points)

    return torch.column_stack((points, results))
Exemple #21
0
import torch
import numpy as np

max_ = 10
min_ = -10
size = 1000

x = (max_ - min_) * torch.rand((size, 2)) + min_
x_0 = torch.tensor([x_[0] for x_ in x])
x_1 = torch.tensor([x_[1] for x_ in x])
y = torch.sin(x_0 + x_1 / np.pi)
pairs = torch.column_stack((x, y))

print(x)
print(y)
print(pairs)

torch.save(pairs, 'mydataset.dat')