Beispiel #1
0
    def init_cent(self, data_array, k):
        temp_data = data_array.clone()
        num_point = temp_data.shape[0]
        num_dim = temp_data.shape[1]

        centroid = torch.zeros(k, num_dim)
        cent_idx = Tensor(np.random.choice(range(num_point), 1)).long()
        centroid[0, :] = temp_data[cent_idx, :]
        temp_data = temp_data[torch.arange(num_point) != cent_idx]
        num_point -= 1
        for k_i in range(0, k - 1, 1):
            distance = torch.sqrt(
                torch.pow(
                    centroid[0:k_i + 1, :].unsqueeze(0).repeat(
                        num_point, 1, 1) -
                    temp_data.unsqueeze(1).repeat(1, k_i + 1, 1),
                    2).sum(dim=-1))
            distance = distance.sum(dim=-1)
            val, idx = torch.max(distance, dim=0)
            centroid[k_i + 1, :] = temp_data[idx, :]
            temp_data = temp_data[torch.arange(num_point) != idx]
            num_point -= 1

        return centroid
Beispiel #2
0
    def forward(self, x, cf, cc):
        H = self.s_1(x)
        H = self.s_1b(H)
        H = self.s_1a(H)
        H = self.s_2(H)
        H = self.s_2b(H)
        H = self.s_2a(H)

        H = H.view(x.size(0), -1)
        if isinstance(cf, Variable):
            fin_state = torch.cat([cf, cc])
        else:
            fin_state = Variable(Tensor([cf, cc]))
        fin_state = torch.unsqueeze(fin_state, 0)
        H = torch.cat([H, fin_state], -1)

        H = self.d_1(H)
        H = self.d_1b(H)
        H = self.d_1a(H)
        H = self.d_2(H)
        H = self.d_2b(H)
        H = self.d_2a(H)
        H = self.d_f(H)
        return self.d_fs(H)
Beispiel #3
0
  def warmup(self):
    """
    Warmup the DND with values from an episode with a random policy
    """
    state = self.env.reset()
    total_reward = 0
    done = False
    while not done:
      action = random.randint(0, self.env.action_space.n - 1)
      next_state, reward, done, _ = self.env.step(action)
      total_reward += reward
      self.transition_queue.append(Transition(state, action, reward))
      state = next_state

    for t in range(len(self.transition_queue)):
      transition = self.transition_queue[t]
      state = Variable(Tensor(transition.state)).unsqueeze(0)
      action = transition.action
      state_embedding = self.embedding_network(move_to_gpu(state))
      dnd = self.dnd_list[action]

      Q_N = move_to_gpu(self.Q_lookahead(t, True))
      if dnd.keys_to_be_inserted is None and dnd.keys is None:
        dnd.insert(state_embedding, Q_N.detach().unsqueeze(0))
      else:
        embedding_index = dnd.get_index(state_embedding)
        if embedding_index is None:
          dnd.insert(state_embedding.detach(), Q_N.detach().unsqueeze(0))
        else:
          Q = self.Q_update(dnd.values[embedding_index], Q_N)
          dnd.update(Q.detach(), embedding_index)
      self.replay_memory.push(transition.state, action, Q_N)
    [dnd.commit_insert() for dnd in self.dnd_list]
    # Clear out transition queue
    self.transition_queue = []
    return total_reward
Beispiel #4
0
def get_class_weights(labels: Series) -> Tensor:
    """
    Calculate class weightings based on each class' proportion
    in the label.
    
    :param labels: The labels in the training dataset.
    :type labels: Series
    :return: A tensor of weights.
    :rtype: Tensor
    """
    # Calculate class weightings
    class_counts = dict(Counter(labels))
    m = max(class_counts.values())
    for c in class_counts:
        class_counts[c] = m / class_counts[c]
    # Convert weightings to tensor
    weights = []
    for k in sorted(class_counts.keys()):
        weights.append(class_counts[k])
    weights = Tensor(weights)
    # Move weights to GPU if available
    if cuda.is_available():
        weights = weights.cuda()
    return weights
Beispiel #5
0
    def program_weights(self, from_reference: bool = True) -> None:
        """Apply weights noise to the current tile weights and saves these for
        repeated drift experiments.

        This method also establishes the drift coefficients for each
        conductance slice.

        Args:
            from_reference: Whether to use weights from reference
        """
        if not from_reference or self.reference_combined_weights is None:
            self.reference_combined_weights = Tensor(
                self.tile.get_weights()).to(self.device)

        self.programmed_weights, self.nu_drift_list = self.noise_model.apply_programming_noise(
            self.reference_combined_weights)

        if self.drift_compensation is not None:
            self.tile.set_weights(
                self.programmed_weights.detach().cpu().numpy())
            forward_output = self._forward_drift_readout_tensor()

            self.drift_baseline = self.drift_compensation.init_baseline(
                forward_output)
    def __getitem__(self, index):
        file_path = train[index]
        img = np.array(Image.open(file_path).convert('RGB').resize([128, 128]))
        img = Tensor(img).view(3, 128, 128)

        file_name = ntpath.basename(ntpath.splitext(train[index])[0])
        y = 0
        age = -1
        sex = -1
        if melanoma_dict.get(file_name) is not None:
            y = melanoma_dict.get(file_name)['target']
            g = melanoma_dict.get(file_name)['gender']
            if g == 'female':
                g = 0
            elif g == 'male':
                g = 1
            else:
                g = -1
            sex = g
            age = melanoma_dict.get(file_name)['age']
            if type(age) == str:
                age = -80

        return [img, y, sex, age]
    def forward(self, x):
        """forward pass of GP model

        """
        year = x.narrow(1,0,1)
        week = x.narrow(1,1,1)
        spatial = x.narrow(1,2,2)
        remote = x.narrow(1,5,1)
        #social = x.narrow(1,8,1)
        # prevent period to reset
        mean = self.mean_module(x).view(-1)

        #compute covariances
        self.covar_season.period_length = Tensor([1]) # year seasonality indicating 1
        covar_season = self.covar_season(year)
        covar_week = self.covar_week(week)
        covar_spatial = self.covar_spatial(spatial)
        covar_remote = self.covar_remote(remote)
        #covar_social = self.covar_social(social)
        
        covariance = covar_season + covar_remote + covar_spatial\
                      + covar_week

        return gpytorch.distributions.MultivariateNormal(mean, covariance)
Beispiel #8
0
    def __call__(self, probs: Tensor, target: Tensor, _: Tensor) -> Tensor:
        assert simplex(probs) and simplex(target)

        pc = probs[:, self.idc, ...].type(torch.float32)
        tc = target[:, self.idc, ...].type(torch.float32)

        w: Tensor = 1 / (
            (einsum("bcwh->bc", tc).type(torch.float32) + 1e-10)**2)
        intersection: Tensor = w * einsum("bcwh,bcwh->bc", pc, tc)
        union: Tensor = w * (einsum("bcwh->bc", pc) + einsum("bcwh->bc", tc))

        divided: Tensor = 1 - 2 * (einsum("bc->b", intersection) +
                                   1e-10) / (einsum("bc->b", union) + 1e-10)

        loss_gde = divided.mean()

        log_p: Tensor = (probs[:, self.idc, ...] + 1e-10).log()
        mask_weighted = torch.einsum(
            "bcwh,c->bcwh", [tc, Tensor(self.weights).to(tc.device)])
        loss_ce = -torch.einsum("bcwh,bcwh->", [mask_weighted, log_p])
        loss_ce /= tc.sum() + 1e-10
        loss = loss_ce + self.lamb * loss_gde

        return loss
Beispiel #9
0
    def move(self, vision):
        vectors: list = []
        for cell in self.muscles:
            vectors.append([i.item() for i in cell.action(vision)])

        averaged_direction: Tensor = Tensor(vectors).mean(0).detach()
        self.vectors = vectors

        for ind, cell in enumerate(self.muscles):
            #for neibor in cell.links:
            #    cell.x += vector[0].
            if cell.energy <= 0:
                del self.muscles[ind]
                continue

            dx: float = self.vectors[ind][0]
            dy: float = self.vectors[ind][1]

            for neibor in cell.links:
                link_vec: tuple = (cell.x - neibor.x, cell.y - neibor.y)
                norm_l: float = norm(link_vec)

                if norm_l == 0:
                    continue

                proj_len: float = scalar_mul(link_vec, (dx, dy))
                projection: tuple = (link_vec[0] * proj_len / norm_l,
                                     link_vec[1] * proj_len / norm_l)

                neibor.x += projection[0]
                neibor.y += projection[1]

            cell.x += self.speed * dx
            cell.y += self.speed * dy
            #bacward pass
            cell.brain.backward(averaged_direction)
Beispiel #10
0
    def test_inertia_2_moves(self, nb, move_at_1, move_at_2, action=2):
        batch_images = Tensor(nb, self.game.get_screen_channels(), self.game.get_screen_height(), self.game.get_screen_width())
        batch_actions = torch.LongTensor(nb)

        for t in range(300):
            self.game.make_action(self.actions[3][1])

        for t in range(nb):
            if t == move_at_1 or t == move_at_2:
                a = action
            else:
                a = 3
            reward = self.game.make_action(self.actions[a][1])

            state = self.game.get_state()
            if state is None:
                self.game.new_episode()
                state = self.game.get_state()

            frame = torch.from_numpy(state.screen_buffer).float()
            batch_images[t] = frame
            batch_actions[t] = a

        return batch_images, batch_actions
Beispiel #11
0
    def pathak_generator(self, probs: Tensor, target: Tensor, bounds) -> Tensor:
        _, w, h = probs.shape

        # Replace the probabilities with certainty for the few weak labels that we have
        weak_labels = target[...]
        weak_labels[self.ignore, ...] = 0
        assert not simplex(weak_labels) and simplex(target)
        lower, upper = bounds[-1]

        labeled_pixels = weak_labels.any(axis=0)
        assert w * h == (labeled_pixels.sum() + (~labeled_pixels).sum())  # make sure all pixels are covered
        scribbled_probs = weak_labels + einsum("cwh,wh->cwh", probs, ~labeled_pixels)
        assert simplex(scribbled_probs)

        u: Tensor
        max_iter: int = 100
        lr: float = 0.00005
        b: Tensor = Tensor([-lower, upper])
        beta: Tensor = torch.zeros(2, torch.float32)
        f: Tensor = torch.zeros(2, *probs.shape)
        f[0, ...] = -1
        f[1, ...] = 1

        for i in range(max_iter):
            exped = - einsum("i,icwh->cwh", beta, f).exp()
            u_star = einsum('cwh,cwh->cwh', probs, exped)
            u_star /= u_star.sum(axis=0)
            assert simplex(u_star)

            d_beta = einsum("cwh,icwh->i", u_star, f) - b
            n_beta = torch.max(torch.zeros_like(beta), beta + lr * d_beta)

            u = u_star
            beta = n_beta

        return probs2one_hot(u)
Beispiel #12
0
    def get_slice(self, info):
        '''
        return tensor (c, h, w)
        c: number of windows to use, 1, 3 or 6
        '''
        # img_file = info['image']
        # img_name = img_file.split('.')[0]
        img_name = info['image']

        if self.mode in ['train', 'val']:
            label = [int(v) for v in info[self.class_name].values]
            label = Tensor(label)

        if self.mode == 'train':
            img = self._load_img_file(img_name)
            img = self.aug(img)
            return img, label
        elif self.mode == 'val':
            img = self._load_img_file(img_name)
            img = self.aug(img)
            return img, label
        else:
            img = self._load_img_file(img_name)
            return img, img_name
Beispiel #13
0
    def get_Q_vals(self, x, model_type):
        '''
        Returns Q values for all actions in the agents action space for 
        target or prediction model
        
        Args:
            x (np.ndarray): np array with action first then observation
        '''
        if type(x) != Tensor:
            x = Tensor(x)

        x = x.to(self.device)

        q_vals = None
        if model_type == 'prediction':
            q_vals = self.prediction_model(x)
        elif model_type == 'target':
            q_vals = self.target_model(x)
        else:
            print(model_type)
            raise ValueError(
                'Model type must be either "prediction" or "target"')

        return q_vals
Beispiel #14
0
    def test_net(self, net):

        true_labels = []
        predicted_labels = []
        for datum in self.val_data:

            batch_eval = np.zeros([
                1, datum['features'].shape[0], datum['features'].shape[1],
                datum['features'].shape[2]
            ])
            batch_eval[0, :, :, :] = datum['features']

            batch_label = np.zeros([1, len(self.CLASS_LABELS)])
            batch_label[0, :] = datum['label']
            prediction = net.net(Variable(Tensor(batch_eval))).data.numpy()

            class_pred = np.argmax(prediction)
            class_truth = datum['label']

            true_labels.append(class_truth)
            predicted_labels.append(class_pred)

        self.getConfusionMatrixPlot(true_labels, predicted_labels,
                                    self.CLASS_LABELS)
Beispiel #15
0
    def weighted_cross_entropy(
            prediction: Tensor,
            target: Tensor,
            inputs: Tensor,
            reduction: str = 'mean') -> Union[float, Tensor]:

        n_classes = prediction.shape[1]

        # Retrieve nearest points and their calsses
        _, _, nn_classes = knn.get(inputs.numpy(), exclude_query=True)
        nn_classes = Tensor(nn_classes)
        nn_class_entropy = entropy(nn_classes)

        # Convert target vector into probabilty distribution
        target = convert_logits_to_class_distribution(target, n_classes)

        # Apply softmax on model output
        prediction = logsoftmax(prediction)

        loss = (-torch.sum(target * prediction, dim=1).reshape(-1, 1) *
                torch.exp(-nn_class_entropy))

        reduction_method = get_reduction_method(reduction)
        return reduction_method(loss)
    def __init__(
        self,
        in_channels: Union[int, Tuple[int, int]],
        out_channels: int,
        act: Optional[Callable] = Sigmoid(),
        root_weight: bool = True,
        bias: bool = True,
        **kwargs,
    ):

        kwargs.setdefault('aggr', 'add')
        super(ResGatedGraphConv, self).__init__(**kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.act = act
        self.root_weight = root_weight

        if isinstance(in_channels, int):
            in_channels = (in_channels, in_channels)

        self.lin_key = Linear(in_channels[1], out_channels)
        self.lin_query = Linear(in_channels[0], out_channels)
        self.lin_value = Linear(in_channels[0], out_channels)

        if root_weight:
            self.lin_skip = Linear(in_channels[1], out_channels, bias=False)
        else:
            self.register_parameter('lin_skip', None)

        if bias:
            self.bias = Parameter(Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()
Beispiel #17
0
def infer(net, x_validate, y_validate):
    x_validate = Tensor(x_validate)
    net.eval()  # turn off dropout layer
    with torch.no_grad():
        outputs = net(x_validate)
    outputs = outputs[:, 0]
    sz = int(x_validate.shape[0])
    y_predict = -np.ones(sz)
    loss = 0
    for i in range(sz):
        output = outputs[i]

        labels = y_validate[i]
        lossRaw = 1. - labels * output
        lossRaw[lossRaw < 0.] = 0.
        loss += lossRaw

        y = output
        if y > 0.:
            y_predict[i] = 1.
    loss /= sz
    temp = y_predict == y_validate
    accuracy = (temp.astype(int)).mean()
    return loss, accuracy
 def __init__(self,
              env_name,
              num_episodes=10000,
              alpha=0.9,
              beta=0.1,
              gamma=0.9):
     """
     Actor Critic Algorithm using softmax policy
     :param alpha: learning rate for actor
     :param beta: learning rate for critic
     :param gamma: discount rate
     """
     super(ActorCritic, self).__init__(env_name,
                                       num_episodes,
                                       alpha,
                                       gamma,
                                       policy="softmax_policy",
                                       report_freq=500,
                                       beta=beta)
     self._feature = Tensor(self.action_size, self.obs_size)
     self.actions = range(self.action_size)
     self.min_alpha = 0.1
     self.min_beta = 0.01
     self._state_value_weight = None
Beispiel #19
0
    def step(self, obs, explore=False):
        """
        Take a step forward in environment for a minibatch of observations
        Inputs:
            obs (PyTorch Variable): Observations for this agent
            explore (boolean): Whether or not to add exploration noise
        Outputs:
            action (PyTorch Variable): Actions for this agent
        """

        action = self.policy(obs)

        if self.discrete_action:
            if explore:
                action = gumbel_softmax(action, hard=True)
            else:
                action = onehot_from_logits(action)
        else:  # continuous action
            if explore:
                action += Variable(Tensor(self.exploration.noise()),
                                   requires_grad=False)
                #print(action,self.exploration.noise(),"noise")
            action = action.clamp(-1, 1)
        return action
Beispiel #20
0
def log_partition_function(rbm, batch_size, all_confs):
    '''Computes (via exact brute-force) the logarithm of the partition function
    of the Ising model defined by the weights of a Restricted Boltzmann Machine

    Arguments:

        :param rbm: Restricted Boltzmann Machine model
        :type rbm: :class:`ebm.models`
        :param batch_size: amount of samples used in every computation step
        :type batch_size: int
        :param all_confs: All possible configurations of the visible neurons
                          of the model
        :type all_confs: torch.Tensor

        :returns logZ: torch.Tensor with the logarithm of the partition function
    '''
    all_confs = DataLoader(all_confs.to(rbm.device), batch_size=batch_size)
    logsumexps = Tensor([]).to(rbm.device)
    #  for batch in tqdm(all_confs, desc='Computing partition function'):
    for batch in all_confs:
        logsumexps = cat([logsumexps, logsumexp(rbm.free_energy(batch).neg())])
    logZ = logsumexp(logsumexps)
    gc.collect()
    return logZ
    def predict_on_image(self, image: Union[np.ndarray, Tensor]) -> Tensor:
        """Predict on a single input."""
        self.eval()

        if image.dtype is np.uint8:
            # Converts an image with range [0, 255] with to PyTorch Tensor with range [0, 1].
            image = self.tensor_transform(image)

        # Rescale image between 0 and 1.
        if image.dtype is torch.uint8 or image.dtype is torch.int64:
            # If the image is an unscaled tensor.
            image = image.type("torch.FloatTensor") / 255

        if not torch.is_tensor(image):
            image = Tensor(image)

        # Put the image tensor on the device the model weights are on.
        image = image.to(self.device)

        logits = self.forward(image)

        segmentation_mask = torch.argmax(logits, dim=1)

        return segmentation_mask
    def test_scalar_edge_loss(self):
        nodes = [Node(), Node(), Node()]
        edges = [
            Edge(nodes[0], nodes[1], Attribute(Tensor([-50.]))),
            Edge(nodes[1], nodes[0], Attribute(Tensor([-40.]))),
            Edge(nodes[0], nodes[2], Attribute(Tensor([1.])))
        ]
        g1 = Graph(nodes, edges)

        g2 = deepcopy(g1)
        g2.ordered_edges[0].attr.val = Tensor([-45.])
        g2.ordered_edges[1].attr.val = Tensor([-40.])
        g2.ordered_edges[2].attr.val = Tensor([1.1])

        loss = GraphLoss(e_fn=MSELoss())
        loss_val = loss(g1, g2).detach().numpy()
        target_loss_val = ((-50. + 45.)**2 + (-40. + 40.)**2 +
                           (1 - 1.1)**2) / 3

        self.assertTrue(np.isclose(loss_val, target_loss_val))
Beispiel #23
0
    def plot_TestPoints(self,model,file,n=1e5,SurfaceRecievers=False):
        # ----- Plotting Recovered Velocity Misfit ----
        plt.clf()
        number_random_checks = 1000
        
        # Projecting sample points into LatLong
        Xp = torch.rand(int(n),6)
        Xp[:,:3] = Xp[:,:3]*(Tensor(self.xmax)-Tensor(self.xmin))[None,:] + Tensor(self.xmin)
        Xp[:,3:] = Xp[:,3:]*(Tensor(self.xmax)-Tensor(self.xmin))[None,:] + Tensor(self.xmin)

        Vp = model.Velocity(Xp)
        X = np.arange(self.xmin[-1],self.xmax[-1],self.sep)
        plt.scatter(Xp[:,-1].cpu().numpy(),Vp.cpu().detach().numpy(),0.1,'k',label='RecoveredVelocity',alpha=0.1)
        plt.plot(X,self.velmod_fnc(X),'g',label='Interpolated Velocity')
        plt.scatter(self.velmod['Depth'],self.velmod['V'],15,'r',label='Input velocity')
        
        plt.ylabel('Velocity km/s')
        plt.xlabel('Depth')
        plt.legend()
        plt.xlim([self.xmin[-1],self.xmax[-1]])
        plt.savefig(file)
                           inh=17.5,
                           nu=[0, 1e-2],
                           norm=78.4)

# Specify dataset wrapper environment.
environment = DatasetEnvironment(dataset=CIFAR10(path='../../data/CIFAR10'),
                                 train=True)

# Build pipeline from components.
pipeline = Pipeline(network=network,
                    environment=environment,
                    encoding=poisson,
                    time=50,
                    plot_interval=1)

# Train the network.
labels = environment.labels
for i in range(60000):
    # Choose an output neuron to clamp to spiking behavior.
    c = choice(10, size=1, replace=False)
    c = 10 * labels[i].long() + Tensor(c).long()
    clamp = torch.zeros(pipeline.time, network.n_neurons, dtype=torch.uint8)
    clamp[:, c] = 1
    clamp_v = torch.zeros(pipeline.time, network.n_neurons, dtype=torch.float)
    clamp_v[:, c] = network.layers['Ae'].thresh + network.layers['Ae'].theta[
        c] + 10

    # Run a step of the pipeline with clamped neuron.
    pipeline.step(clamp={'Ae': clamp}, clamp_v={'Ae': clamp_v})
    network.reset_()
Beispiel #25
0
    def init_stable_ss(self, loader, eps=1E-4, init_var=1.2, solver="SCS"):

        print("RUNNING N4SID for intialization of A, Bu, C, Du")

        data = [(u, y) for (idx, u, y) in loader]
        U = data[0][0][0].numpy()
        Y = data[0][1][0].numpy()
        sys_id = sippy.system_identification(Y,
                                             U,
                                             'N4SID',
                                             SS_fixed_order=self.nx)

        Ass = sys_id.A
        Bss = sys_id.B
        Css = sys_id.C
        Dss = sys_id.D

        # Sample points, calulate next state
        samples = 5000
        xtild = 3 * np.random.randn(self.nx, samples)
        utild = 3 * np.random.randn(self.nu, samples)
        xtild_next = Ass @ xtild + Bss @ utild

        print("Initializing using LREE")

        solver_tol = 1E-4
        print("Initializing stable LMI ...")

        # Lip SDP multiplier
        if self.method == "Layer":
            multis = cp.Variable((1, 1), 'lambdas', nonneg=True)
            T = multis * np.eye(self.nw)

        elif self.method == "Neuron":
            multis = cp.Variable((self.nw), 'lambdas', nonneg=True)
            T = cp.diag(multis)

        elif self.method == "Network":
            print(
                'YOU ARE USING THE NETWORK IQC MULTIPLIER. THIS DOES NOT WORK. PLEASE CHANGE TO NEURON OR LAYER'
            )
            # Variables can be mapped to tril matrix => (n+1) x n // 2 variables
            multis = cp.Variable((self.nx + 1) * self.nx // 2,
                                 'lambdas',
                                 nonneg=True)

            # Used for mapping vector to tril matrix
            indices = list(range((self.nx + 1) * self.nx // 2))
            Tril_Indices = np.zeros((self.nx, self.nx), dtype=int)
            Tril_Indices[np.tril_indices(self.nx)] = indices

            # return the (ii,jj)'th multiplier
            get_multi = lambda ii, jj: multis[Tril_Indices[ii, jj]]

            # Get the structured matrix in T
            Id = np.eye(self.nx)
            e = lambda ii: Id[:, ii:ii + 1]
            Tij = lambda ii, jj: e(ii) @ e(ii).T if ii == jj else (e(ii) - e(
                jj)) @ (e(ii) - e(jj)).T

            # Construct the full conic comibation of IQC's
            T = sum(
                Tij(ii, jj) * get_multi(ii, jj) for ii in range(self.nx)
                for jj in range(ii + 1))
        else:
            print("Invalid method selected. Try Neuron, Layer or Network")

        # Construct LMIs
        P = cp.Variable((self.nx, self.nx), 'P', symmetric=True)
        E = cp.Variable((self.nx, self.nx), 'E')
        F = cp.Variable((self.nx, self.nx), 'F')
        B1 = cp.Variable((self.nx, self.nw), 'Bw')
        B2 = cp.Variable((self.nx, self.nu), 'Bu')

        # Randomly initialize C2
        C2 = np.random.normal(0, init_var / np.sqrt(self.nw),
                              (self.nw, self.nx))
        D22 = np.random.normal(0, init_var / np.sqrt(self.nw),
                               (self.nw, self.nu))

        Ctild = T @ C2
        Dtild = T @ D22

        # Stability LMI
        S = cp.bmat([[np.zeros((self.nx, self.nx)), Ctild.T], [Ctild, -2 * T]])
        z1 = np.zeros((self.nx, self.nw))
        z2 = np.zeros((self.nw, self.nw))
        Mat11 = cp.bmat([[E + E.T - P, z1], [z1.T, z2]]) - S
        Mat21 = cp.bmat([[F, B1]])
        Mat22 = P
        Mat = cp.bmat([[Mat11, Mat21.T], [Mat21, Mat22]])

        # epsilon ensures strict feasability
        constraints = [
            Mat >> (eps + solver_tol) * np.eye(Mat.shape[0]), P >>
            (eps + solver_tol) * np.eye(self.nx), E + E.T >>
            (eps + solver_tol) * np.eye(self.nx), multis >= 1E-6
        ]

        # ensure wide distribution of eigenvalues for Bw
        bv = self.bv.detach().numpy()[:, None]

        if type(self.nl) is torch.nn.ReLU:
            wt = np.maximum(C2 @ xtild + D22 @ utild + bv, 0)
        else:
            wt = np.tanh(C2 @ xtild + D22 @ utild + bv)

        zt = np.concatenate([xtild_next, xtild, wt, utild], 0)

        EFBB = cp.bmat([[E, -F, -B1, -B2]])

        # empirical covariance matrix PHI
        Phi = zt @ zt.T
        R = cp.Variable(
            (2 * self.nx + self.nw + self.nu, 2 * self.nx + self.nw + self.nu))
        Q = cp.bmat([[R, EFBB.T], [EFBB, E + E.T - np.eye(self.nx)]])

        objective = cp.Minimize(cp.trace(R @ Phi))
        constraints.append(Q >> 0)

        prob = cp.Problem(objective, constraints)

        if solver == "mosek":
            prob.solve(solver=cp.MOSEK)
        else:
            prob.solve(solver=cp.SCS)
        print("Initilization Status: ", prob.status)

        # Solve for output mapping from (W, X, U) -> Y
        # using linear least squares
        X = np.zeros((self.nx, U.shape[1]))
        X[:, 0:1] = sys_id.x0

        Einv = np.linalg.inv(E.value)
        Ahat = Einv @ F.value
        Bwhat = Einv @ B1.value
        Buhat = Einv @ B2.value
        for t in range(1, U.shape[1]):
            w = np.maximum(C2 @ X[:, t - 1:t] + D22 @ U[:, t - 1:t] + bv, 0)
            X[:, t:t +
              1] = Ahat @ X[:, t - 1:t] + Bwhat @ w + Buhat @ U[:, t - 1:t]

        if type(self.nl) is torch.nn.ReLU:
            W = np.maximum(C2 @ X + D22 @ U + bv, 0)
        else:
            W = np.tanh(C2 @ X + D22 @ U + bv, 0)

        Z = np.concatenate([X, W, U], 0)
        output_mats = Y @ np.linalg.pinv(Z)

        C1 = output_mats[:, :self.nx]
        D11 = output_mats[:, self.nx:self.nx + self.nw]
        D12 = output_mats[:, self.nx + self.nw:]

        # Assign results to model
        self.IQC_multipliers = Parameter(Tensor(multis.value))
        self.E = Parameter(Tensor(E.value))
        self.P = Parameter(Tensor(P.value))
        self.F.weight = Parameter(Tensor(F.value))
        self.B1.weight = Parameter(Tensor(B1.value))
        self.B2.weight = Parameter(Tensor(B2.value))

        # Output mappings
        self.C1.weight = Parameter(Tensor(C1))
        self.D12.weight = Parameter(Tensor(D12))
        self.D11.weight = Parameter(Tensor(D11))
        self.by = Parameter(Tensor([0.0]))

        # Store Ctild, C2 is extracted from T^{-1} \tilde{C}
        self.C2tild = Parameter(Tensor(Ctild.value))
        self.Dtild = Parameter(Tensor(Dtild.value))

        print("Init Complete")
Beispiel #26
0
    def initialize_stable_LMI(self,
                              eps=1E-4,
                              init_var=1.5,
                              obj='B',
                              solver="SCS"):

        solver_tol = 1E-4
        print("Initializing stable LMI ...")
        # Lip SDP multiplier
        if self.method == "Layer":
            multis = cp.Variable((1, 1), 'lambdas', nonneg=True)
            T = multis * np.eye(self.nw)

        elif self.method == "Neuron":
            multis = cp.Variable((self.nw), 'lambdas', nonneg=True)
            T = cp.diag(multis)

        elif self.method == "Network":
            print(
                'YOU ARE USING THE NETWORK IQC MULTIPLIER. THIS DOES NOT WORK. PLEASE CHANGE TO NEURON OR LAYER'
            )
            # Variables can be mapped to tril matrix => (n+1) x n // 2 variables
            multis = cp.Variable((self.nx + 1) * self.nx // 2,
                                 'lambdas',
                                 nonneg=True)

            # Used for mapping vector to tril matrix
            indices = list(range((self.nx + 1) * self.nx // 2))
            Tril_Indices = np.zeros((self.nx, self.nx), dtype=int)
            Tril_Indices[np.tril_indices(self.nx)] = indices

            # return the (ii,jj)'th multiplier
            get_multi = lambda ii, jj: multis[Tril_Indices[ii, jj]]

            # Get the structured matrix in T
            Id = np.eye(self.nx)
            e = lambda ii: Id[:, ii:ii + 1]
            Tij = lambda ii, jj: e(ii) @ e(ii).T if ii == jj else (e(ii) - e(
                jj)) @ (e(ii) - e(jj)).T

            # Construct the full conic comibation of IQC's
            T = sum(
                Tij(ii, jj) * get_multi(ii, jj) for ii in range(self.nx)
                for jj in range(ii + 1))
        else:
            print("Invalid method selected. Try Neuron, Layer or Network")

        # Construct LMIs
        P = cp.Variable((self.nx, self.nx), 'P', symmetric=True)
        E = cp.Variable((self.nx, self.nx), 'E')
        F = cp.Variable((self.nx, self.nx), 'F')
        Bw = cp.Variable((self.nx, self.nw), 'Bw')

        Cv = np.random.normal(0, init_var / np.sqrt(self.nx),
                              (self.nw, self.nx))
        Gamma_v = sp.linalg.block_diag(Cv, np.eye(self.nw))
        M = cp.bmat(
            [[-2 * self.alpha * self.beta * T, (self.alpha + self.beta) * T],
             [(self.alpha + self.beta) * T, -2 * T]])

        # Construct final LMI.
        z1 = np.zeros((self.nx, self.nw))
        z2 = np.zeros((self.nw, self.nw))

        Mat11 = cp.bmat([[E + E.T - P, z1], [z1.T, z2]
                         ]) - Gamma_v.T @ M @ Gamma_v

        Mat21 = cp.bmat([[F, Bw]])
        Mat22 = P

        Mat = cp.bmat([[Mat11, Mat21.T], [Mat21, Mat22]])

        # epsilon ensures strict feasability
        constraints = [
            Mat >> (eps + solver_tol) * np.eye(Mat.shape[0]), P >>
            (eps + solver_tol) * np.eye(self.nx), E + E.T >>
            (eps + solver_tol) * np.eye(self.nx), multis >= 1E-6
        ]

        A = np.random.normal(0, init_var / np.sqrt(self.nx),
                             (self.nx, self.nw))
        # Ass = np.eye(self.nx)

        # ensure wide distribution of eigenvalues for Bw
        objective = cp.Minimize(cp.norm(E @ A - Bw))

        prob = cp.Problem(objective, constraints)

        if solver == "mosek":
            prob.solve(solver=cp.MOSEK)
        else:
            prob.solve(solver=cp.SCS)

        print("Initilization Status: ", prob.status)

        # self.L_squared = torch.nn.Parameter(torch.Tensor(L_sq.value))
        self.IQC_multipliers = Parameter(Tensor(multis.value))
        self.E = Parameter(Tensor(E.value))
        self.P = Parameter(Tensor(P.value))
        self.F.weight = Parameter(Tensor(F.value))
        self.Bw.weight = Parameter(Tensor(Bw.value))
        self.Bu.weight = Parameter(Tensor(0.1 * self.Bu.weight.data))
        self.C.weight = Parameter(Tensor(0.1 * self.C.weight.data))
        self.Du.weight = Parameter(Tensor(0.0 * self.Du.weight.data))

        self.Ctild = Parameter(Tensor(Cv))

        print("Init Complete")
Beispiel #27
0
    def init_lipschitz_ss(self,
                          loader,
                          gamma=10.0,
                          eps=1E-4,
                          init_var=1.2,
                          solver="SCS"):

        print("RUNNING N4SID for intialization of A, Bu, C, Du")

        data = [(u, y) for (idx, u, y) in loader]
        U = data[0][0][0].numpy()
        Y = data[0][1][0].numpy()
        sys_id = sippy.system_identification(Y,
                                             U,
                                             'N4SID',
                                             SS_fixed_order=self.nx)

        Ass = sys_id.A
        Bss = sys_id.B
        Css = sys_id.C
        Dss = sys_id.D

        # Calculate the trajectory.
        Xtraj = np.zeros((self.nx, Y.shape[1]))
        for tt in range(1, Y.shape[1]):
            Xtraj[:, tt:tt +
                  1] = Ass @ Xtraj[:, tt - 1:tt] + Bss @ U[:, tt - 1:tt]

        # Sample points, calulate next state
        samples = 5000
        xtild = 3 * np.random.randn(self.nx, samples)
        utild = 3 * np.random.randn(self.nu, samples)
        xtild_next = Ass @ xtild + Bss @ utild

        print("Initializing using LREE")

        solver_tol = 1E-3
        print("Initializing stable LMI ...")

        # Lip SDP multiplier
        if self.method == "Layer":
            multis = cp.Variable((1, 1), 'lambdas', nonneg=True)
            T = multis * np.eye(self.nw)

        elif self.method == "Neuron":
            multis = cp.Variable((self.nw), 'lambdas', nonneg=True)
            T = cp.diag(multis)

        elif self.method == "Network":
            print(
                'YOU ARE USING THE NETWORK IQC MULTIPLIER. THIS DOES NOT WORK. PLEASE CHANGE TO NEURON OR LAYER'
            )
            # Variables can be mapped to tril matrix => (n+1) x n // 2 variables
            multis = cp.Variable((self.nx + 1) * self.nx // 2,
                                 'lambdas',
                                 nonneg=True)

            # Used for mapping vector to tril matrix
            indices = list(range((self.nx + 1) * self.nx // 2))
            Tril_Indices = np.zeros((self.nx, self.nx), dtype=int)
            Tril_Indices[np.tril_indices(self.nx)] = indices

            # return the (ii,jj)'th multiplier
            get_multi = lambda ii, jj: multis[Tril_Indices[ii, jj]]

            # Get the structured matrix in T
            Id = np.eye(self.nx)
            e = lambda ii: Id[:, ii:ii + 1]
            Tij = lambda ii, jj: e(ii) @ e(ii).T if ii == jj else (e(ii) - e(
                jj)) @ (e(ii) - e(jj)).T

            # Construct the full conic comibation of IQC's
            T = sum(
                Tij(ii, jj) * get_multi(ii, jj) for ii in range(self.nx)
                for jj in range(ii + 1))
        else:
            print("Invalid method selected. Try Neuron, Layer or Network")

        # Construct LMIs
        P = cp.Variable((self.nx, self.nx), 'P', symmetric=True)
        E = cp.Variable((self.nx, self.nx), 'E')
        F = cp.Variable((self.nx, self.nx), 'F')
        B1 = cp.Variable((self.nx, self.nw), 'Bw')
        B2 = cp.Variable((self.nx, self.nu), 'Bu')

        # Output matrices
        C1 = cp.Variable((self.ny, self.nx), 'C1')
        D11 = cp.Variable((self.ny, self.nw), 'D11')
        D12 = cp.Variable((self.ny, self.nu), 'D12')

        # Randomly initialize C2
        C2 = np.random.normal(0, init_var / np.sqrt(self.nw),
                              (self.nw, self.nx))
        D22 = np.random.normal(0, init_var / np.sqrt(self.nw),
                               (self.nw, self.nu))

        Ctild = T @ C2
        Dtild = T @ D22

        # lmi for dl2 gain bound.
        zxu = np.zeros((self.nx, self.nu))
        L_sq = gamma**2

        # Mat11 = utils.bmat([E + E.T - P, z1, L_sq * np.eye(self.nu)]) - Gamma_v.T @ M @ Gamma_v
        Mat11 = cp.bmat([[E + E.T - P, -self.beta * Ctild.T, zxu],
                         [-self.beta * Ctild, 2 * T, -self.beta * Dtild],
                         [zxu.T, -self.beta * Dtild.T,
                          L_sq * np.eye(self.nu)]])

        Mat21 = cp.bmat([[F, B1, B2], [C1, D11, D12]])
        Mat22 = cp.bmat([[P, np.zeros((self.nx, self.ny))],
                         [np.zeros((self.ny, self.nx)),
                          np.eye(self.ny)]])

        Mat = cp.bmat([[Mat11, Mat21.T], [Mat21, Mat22]])

        # epsilon ensures strict feasability
        constraints = [
            Mat >> solver_tol * np.eye(Mat.shape[0]), P >>
            (eps + solver_tol) * np.eye(self.nx), E + E.T >>
            (eps + solver_tol) * np.eye(self.nx), multis >= 1E-6
        ]

        # Find the closest l2 gain bounded model
        bv = self.bv.detach().numpy()[:, None]

        if type(self.nl) is torch.nn.ReLU:
            wt = np.maximum(C2 @ xtild + D22 @ utild + bv, 0)
            wtraj = np.maximum(C2 @ Xtraj + D22 @ U + bv, 0)
        else:
            wt = np.tanh(C2 @ xtild + D22 @ utild + bv)
            wtraj = np.tanh(C2 @ Xtraj + D22 @ U + bv, 0)

        zt = np.concatenate([xtild_next, xtild, wt, utild], 0)

        EFBB = cp.bmat([[E, -F, -B1, -B2]])

        # empirical covariance matrix PHI
        Phi = zt @ zt.T
        R = cp.Variable(
            (2 * self.nx + self.nw + self.nu, 2 * self.nx + self.nw + self.nu))
        Q = cp.bmat([[R, EFBB.T], [EFBB, E + E.T - np.eye(self.nx)]])

        # Add additional term for output errors

        eta = Y - C1 @ Xtraj - D11 @ wtraj - D12 @ U

        objective = cp.Minimize(cp.trace(R @ Phi) + cp.norm(eta, p="fro")**2)
        constraints.append(Q >> 0)

        prob = cp.Problem(objective, constraints)
        if solver == "mosek":
            prob.solve(solver=cp.MOSEK)
        else:
            prob.solve(solver=cp.SCS)

        print("Initilization Status: ", prob.status)

        # Assign results to model
        self.IQC_multipliers = Parameter(Tensor(multis.value))
        self.E = Parameter(Tensor(E.value))
        self.P = Parameter(Tensor(P.value))
        self.F.weight = Parameter(Tensor(F.value))
        self.B1.weight = Parameter(Tensor(B1.value))
        self.B2.weight = Parameter(Tensor(B2.value))

        # Output mappings
        self.C1.weight = Parameter(Tensor(C1.value))
        self.D12.weight = Parameter(Tensor(D12.value))
        self.D11.weight = Parameter(Tensor(D11.value))
        self.by = Parameter(Tensor([0.0]))

        # Store Ctild and Dtild, C2 and D22 are extracted from
        #  T^{-1} \tilde{C} and T^{-1} \tilde{Dtild}
        self.C2tild = Parameter(Tensor(Ctild.value))
        self.Dtild = Parameter(Tensor(Dtild.value))

        print("Init Complete")
Beispiel #28
0
    def initialize_lipschitz_LMI(self,
                                 gamma=10.0,
                                 eps=1E-4,
                                 init_var=1.5,
                                 solver="SCS"):
        solver_tol = 1E-4
        print("Initializing Lipschitz LMI ...")
        # Lip SDP multiplier
        if self.method == "Layer":
            multis = cp.Variable((1), 'lambdas', nonneg=True)
            T = multis * np.eye(self.nx)

        elif self.method == "Neuron":
            multis = cp.Variable((self.nx), 'lambdas', nonneg=True)
            T = cp.diag(multis)

        elif self.method == "Network":
            # Variables can be mapped to tril matrix => (n+1) x n // 2 variables
            multis = cp.Variable((self.nx + 1) * self.nx // 2,
                                 'lambdas',
                                 nonneg=True)

            # return the (ii,jj)'th multiplier
            get_multi = lambda ii, jj: multis[(ii * (ii + 1)) // 2 + jj]

            # Get the structured matrix in T
            Id = np.eye(self.nx)
            e = lambda ii: Id[:, ii:ii + 1]
            Tij = lambda ii, jj: e(ii) @ e(ii).T if ii == jj else (e(ii) - e(
                jj)) @ (e(ii) - e(jj)).T

            # Construct the full conic comibation of IQC's
            T = sum(
                Tij(ii, jj) * get_multi(ii, jj) for ii in range(0, self.nx)
                for jj in range(0, ii + 1))
        else:
            print("Invalid method selected. Try Neuron, Layer or Network")

        # square of L2 gain
        # L_sq = cp.Variable((1, 1), "rho")
        L_sq = gamma**2

        # Construct LMIs
        P = cp.Variable((self.nx, self.nx), 'P', symmetric=True)
        E = cp.Variable((self.nx, self.nx), 'E')
        F = cp.Variable((self.nx, self.nx), 'F')
        Bu = cp.Variable((self.nx, self.nu), 'Bu')
        C = cp.Variable((self.ny, self.nx), 'C')
        Dw = cp.Variable((self.ny, self.nx), 'Dw')
        Du = cp.Variable((self.ny, self.nu), 'Du')

        Bw = cp.Variable((self.nx, self.nw), 'Bw')

        Cv = np.random.normal(0, init_var / np.sqrt(self.nx),
                              (self.nw, self.nx))

        Gamma_1 = sp.linalg.block_diag(Cv, np.eye(self.nw))
        Gamma_v = np.concatenate(
            [Gamma_1, np.zeros((2 * self.nw, self.nu))], axis=1)

        M = cp.bmat(
            [[-2 * self.alpha * self.beta * T, (self.alpha + self.beta) * T],
             [(self.alpha + self.beta) * T.T, -2 * T]])

        # Construct final LMI.
        zxw = np.zeros((self.nx, self.nw))
        zxu = np.zeros((self.nx, self.nu))
        zwu = np.zeros((self.nw, self.nu))
        zww = np.zeros((self.nw, self.nw))

        # Mat11 = utils.bmat([E + E.T - P, z1, L_sq * np.eye(self.nu)]) - Gamma_v.T @ M @ Gamma_v
        Mat11 = cp.bmat([[E + E.T - P, zxw, zxu], [zxw.T, zww, zwu],
                         [zxu.T, zwu.T, L_sq * np.eye(self.nu)]
                         ]) - Gamma_v.T @ M @ Gamma_v

        Mat21 = cp.bmat([[F, Bw, Bu], [C, Dw, Du]])
        Mat22 = cp.bmat([[P, np.zeros((self.nx, self.ny))],
                         [np.zeros((self.ny, self.nx)),
                          np.eye(self.ny)]])

        Mat = cp.bmat([[Mat11, Mat21.T], [Mat21, Mat22]])

        # epsilon ensures strict feasability
        constraints = [
            Mat >> solver_tol * np.eye(Mat.shape[0]), P >>
            (eps + solver_tol) * np.eye(self.nx), E + E.T >>
            (eps + solver_tol) * np.eye(self.nx), multis >= 1E-6
        ]

        # Just find a feasible point
        A = np.random.normal(0, init_var / np.sqrt(self.nx),
                             (self.nx, self.nx))

        # Just find a feasible point
        objective = cp.Minimize(cp.norm(E @ A - Bw))

        prob = cp.Problem(objective, constraints)

        if solver == "mosek":
            prob.solve(solver=cp.MOSEK)
        elif solver == "SCS":
            prob.solve(solver=cp.SCS)
        else:
            print("Select valid sovler")

        print("Initilization Status: ", prob.status)

        # self.L_squared = torch.nn.Parameter(torch.Tensor(L_sq.value))
        self.IQC_multipliers = Parameter(Tensor(multis.value))
        self.E = Parameter(Tensor(E.value))
        self.P = Parameter(Tensor(P.value))
        self.F.weight = Parameter(Tensor(F.value))
        self.Bw.weight = Parameter(Tensor(Bw.value))
        self.Bu.weight = Parameter(Tensor(Bu.value))
        self.C.weight = Parameter(Tensor(C.value))
        self.Dw.weight = Parameter(Tensor(Dw.value))
        self.Du.weight = Parameter(Tensor(Du.value))
        self.Cv.weight = Parameter(Tensor(Cv))
    def __getitem__(self, idx):

        # read image
        img_name_a = os.path.join(self.training_image_path,
                                  self.img_a_names[idx])

        img_name_b = os.path.join(self.training_image_path,
                                  self.img_b_names[idx])

        image_a = cv2.imread(img_name_a, cv2.IMREAD_COLOR)
        image_b = cv2.imread(img_name_b, cv2.IMREAD_COLOR)
        vertices = ast.literal_eval(self.img_a_vertices[idx])

        # read theta
        theta = self.theta_array[idx, :]

        if self.geometric_model == 'affine':

            # reshape theta to 2x3 matrix [A|t] where
            # first row corresponds to X and second to Y
            theta = theta[[3, 2, 5, 1, 0, 4]].reshape(2, 3)

        elif self.geometric_model == 'tps':

            theta = np.expand_dims(np.expand_dims(theta, 1), 2)

        # hold in the image_a only the crop but maintaining resolution
        # we achieve this by blanking each pixel outside the vertices
        image_a = blank_outside_verts(image_a, vertices)

        # make arrays float tensor for subsequent processing
        image_a = Tensor(image_a.astype(np.float32))
        image_b = Tensor(image_b.astype(np.float32))
        theta = Tensor(theta.astype(np.float32))

        # permute order of image to CHW
        image_a = image_a.transpose(1, 2).transpose(0, 1)
        image_b = image_b.transpose(1, 2).transpose(0, 1)

        # Resize image using bilinear sampling with identity affine tnf
        if image_a.size()[0] != self.out_h or image_a.size()[1] != self.out_w:

            image_a = self.affineTnf(
                Variable(image_a.unsqueeze(0),
                         requires_grad=False)).data.squeeze(0)

        # Resize image using bilinear sampling with identity affine tnf
        if image_b.size()[0] != self.out_h or image_b.size()[1] != self.out_w:

            image_b = self.affineTnf(
                Variable(image_b.unsqueeze(0),
                         requires_grad=False)).data.squeeze(0)

        # if self.mode == 'test':
        sample = {
            'image_a': image_a,
            'vertices_a': Tensor(vertices),
            'image_b': image_b,
            'theta': theta
        }

        if self.transform:
            sample = self.transform(sample)

        return sample
Beispiel #30
0
        image = discriminator(image.cuda())
        plotter.plotImage(image[0])
        plotter.plotImage(image[1])

    noise = noiseGenerator.generate(batchSize, channels,
                                    (noiseHeight, noiseWidth))

    generatedImage = generator(noise)
    plotter.plotImage(generatedImage)
    plotter.plotImage(discriminator(generatedImage)[0])

#%%
if (training_mode):
    discriminatorLossesAveragesArray = []
    generatorLossesAveragesArray = []
    tensorReal = Tensor(full((batchSize, 1), fill_value=1, dtype=f32)).cuda()
    tensorFake = Tensor(full((batchSize, 1), fill_value=0, dtype=f32)).cuda()

#%%
noise = noiseGenerator.generate(batchSize, channels, (noiseHeight, noiseWidth))
print(generator(noise).size())
plt.imshow(t.ToPILImage()(generator(noise)[0].cpu().detach()))

#%%
print(len(training_loader))

#%%
tempDiscriminatorRatio = discriminatorIterationsRatio
tempGeneratorRatio = generatorIterationsRatio

#%%