def __sanity_checks(X, y=None): if len(np.shape(X)) != 2: raise ValueError( f'Dataset X must be array of shape (n_datapoints, 2), was given {np.shape(X)}.' ) n_datapoints, feature_dim = np.shape(X) if feature_dim != 2: raise ValueError( f'Dataset X must have feature dimension of 2, was given {feature_dim}' ) if y is not None: if len(y) != n_datapoints: raise ValueError( f'Targets y must be of same length as X. Expected length {n_datapoints}, was given {len(y)}' ) data_classes = np.unique(y) if len(data_classes) != 2: raise ValueError( f'Currently only binary classification problems are supported!' )
def __init__(self, layout, reward): self.reward_init = reward self.reward = reward self.layout = layout self.max_i = np.shape(layout)[0] - 1 self.max_j = np.shape(layout)[1] - 1 self.state = [self.max_i, self.max_j]
def model_function(X): if np.shape(X) == (self.feature_dim,): return np.array([circuit(X, weights)]) elif len(np.shape(X)) == 2 and np.shape(X)[1] == self.feature_dim: return np.array([circuit(x, weights) for x in X]) else: raise ValueError(f'X must be either a single feature vector of length {self.feature_dim} or a ' f'collection of vectors of shape (*, {self.feature_dim})!')
def apply(self, features, weights): if np.shape(features) != self.feature_shape: raise ValueError(f'Feature must have shape {self.feature_shape}, was given {np.shape(features)}') if np.shape(weights) != self.weight_shape: raise ValueError(f'Weights must have shape {self.weight_shape}, was given {np.shape(weights)}') features = self._feature_padding(features) fpl = self.n_features_per_qubit * self.n_qubits # features per layer for i in range(self.n_layers): for j in range(self.n_sub_layers): self.layer(features[j*fpl:(j+1)*fpl], weights[i, j, :])
def __init__(self, embedding, X, y): """ Args: embedding (BaseEmbedding): Instance of BaseEmbedding X (np.array): Training dataset of shape (n_datapoints, feature_dim) y (np.array): Training labels of shape (n_datapoints,) """ # check if the embedding object is of the correct type if not isinstance(embedding, BaseEmbedding): raise ValueError( 'Embedding must be an instance that inherits from BaseEmbedding class.' ) self.embedding = embedding # check if the dataset X is of the correct shape if len(np.shape(X)) != 2: raise ValueError( f'Dataset X must be array of shape (n_datapoints, feature_dim), was given {np.shape(X)}.' ) self.n_datapoints, self.feature_dim = np.shape(X) # check if the dataset X and the training labels y have the same first dimension if self.n_datapoints != np.shape(y)[0]: raise ValueError( f'Dataset X and training labels y must have the same first dimension. Got {self.n_datapoints} datapoints and {np.shape(y)[0]} labels.' ) # check if the dataset feature dimension matches the embedding feature dimension if self.feature_dim != embedding.feature_dim: raise ValueError( f'Dataset dimension does not match embedding feature dimension! Expected d={embedding.feature_dim}, was given d={self.feature_dim}.' ) self.X = np.array(X, dtype=float, requires_grad=False) self.y = np.array(y, requires_grad=False) self.data_classes = np.unique(self.y) self.n_data_classes = len(self.data_classes) self.class_priors = np.array([ len(X[self.y == data_class]) / self.n_datapoints for data_class in self.data_classes ]) if self.n_data_classes > 2: raise NotImplementedError( 'EmbeddingTrainer currently only supports 2-class classification thesis_datasets!' ) self.X_1 = self.X[self.y == self.data_classes[0]] self.X_2 = self.X[self.y == self.data_classes[1]] self.opt = None
def feature_map(self, X, weights, return_type='vector'): if return_type == 'vector': circ = self._embedding_circuit_vector elif return_type == 'matrix': circ = self._embedding_circuit_matrix else: raise ValueError("'return_type' must be either 'vector' or 'matrix'") if np.shape(X) == (self.feature_dim,): return np.array([circ(X, weights)]) elif len(np.shape(X)) == 2 and np.shape(X)[1] == self.feature_dim: return np.array([circ(x, weights) for x in X]) else: raise ValueError(f'Argument must be either a single feature vector of length {self.feature_dim} or a ' f'collection of vectors of shape (*, {self.feature_dim})!')
def __getitem__(self, idx): if self.ts_type == "spikes": TS = self.ts_spikes elif self.ts_type == "calcium": TS = self.ts_calcium t_initial = np.random.randint(0, np.shape(TS)[1] - self.ts_length - 1) ts = np.transpose(TS[:, t_initial:t_initial + self.ts_length]) target = TS[:, t_initial + self.ts_length + 1] if self.noise: ts = ts + np.random.normal(0, self.noise, ts.shape) target = target + np.random.normal(0, self.noise, target.shape) return [torch.from_numpy(ts), target]
def _train(self, n_epochs, batch_size, starting_weights, compute_cost): if starting_weights is None: weights = self.embedding.random_starting_weights() else: if np.shape(starting_weights) != self.embedding.weight_shape: raise ValueError( f'Starting weights must have shape {self.embedding.weight_shape}, was given {np.shape(starting_weights)}.' ) weights = starting_weights weights_history = np.zeros((n_epochs + 1, ) + self.embedding.weight_shape) weights_history[0] = weights if compute_cost: cost_history = np.zeros(n_epochs + 1) cost_history[0] = self.cost(weights, self.X_1, self.X_2) # expose the embedding 'n_epochs' times to the whole dataset for i in range(n_epochs): # iterate over batched data for input_batch, target_batch in self.__iterate_minibatches( batch_size): filter = target_batch == self.data_classes[0] inputs_1 = input_batch[filter] inputs_2 = input_batch[np.logical_not(filter)] weights = self.opt.step(self.cost, weights, inputs_1=inputs_1, inputs_2=inputs_2) weights_history[i + 1] = weights if compute_cost: cost_history[i + 1] = self.cost(weights, self.X_1, self.X_2) print(f'Cost after epoch {i + 1}: {cost_history[i]}') else: print(f'Completed epoch {i+1}') if compute_cost: return weights, (weights_history, cost_history) else: return weights, weights_history