コード例 #1
0
def single_param_cross_validator_func(subject, config, dataset, label, values, apply_func):
    windows = config['windows']

    all_scores = []

    full_data = dataset.get_data([subject])[subject]

    for this_value in values:
        print('using {} = {}'.format(label, this_value))

        apply_func(config, this_value)

        print('extracting epoch for subject ', subject)
        this_subject_data = extract_epochs(full_data, config)
        print('extraction complete for ', subject)

        scores = []
        for window_start in windows:
            print('start at ', window_start, end=', ')
            data = get_window(this_subject_data, config=config, start=window_start)
            score = classify(data, config=config)
            scores.append(score)
            print(score)
        all_scores.append(scores)

    return all_scores
コード例 #2
0
ファイル: bot_grid.py プロジェクト: mouse1130/wow_fishing_bot
    def find_wow(self):

        process_running = utils.check_process(
            self.game_process_name.edit.text())
        window = utils.get_window(self.window_name.edit.text())

        # check Wow is running
        if process_running and window:
            self.bot.set_wow_frame(window)
            self.log_viewer.emitter.emit("Wow window at" + str(self.bot.frame))
            self.fish_button.setEnabled(True)
            self.find_wow_button.setStyleSheet("background: green;")
        else:
            self.log_viewer.emitter.emit("Wow not found running")
            self.fish_button.setEnabled(False)
            self.find_wow_button.setStyleSheet("")
コード例 #3
0
 def forward(self, x, *dummy):
     batch, h, w, c = x.shape
     self.tensor_in = x
     self.h_range = utils.get_out_length(h, self.pool_size, self.stride)
     self.w_range = utils.get_out_length(w, self.pool_size, self.stride)
     tensor_out = np.zeros(
         (batch, self.h_range, self.w_range, c)).astype(np.float32)
     for window, i, j, _, _, _, _ in utils.get_window(
             x, self.h_range, self.w_range, self.pool_size, self.stride):
         if self.type == 'max':
             tensor_out[:, i, j, :] = np.amax(window, axis=(1, 2))
         elif self.type == 'average':
             tensor_out[:, i, j, :] = np.mean(window, axis=(1, 2))
         else:
             raise ValueError('unexpected pooling type')
     self.value = tensor_out
コード例 #4
0
def by_window_func(subject, config, dataset):
    print("loading data for subject", subject)

    this_subject_data = dataset.get_data([subject])[subject]
    this_subject_data = extract_epochs(this_subject_data, config)

    scores = []

    windows = config['windows']

    for window_start in windows:
        data = get_window(this_subject_data, config=config, start=window_start)
        score = classify(data, config=config)
        print(score)
        scores.append(score)

    return scores
コード例 #5
0
 def forward(self, x, args):
     batch, h, w, c = x.shape
     self.h_range = utils.get_out_length(h, self.weight_size, self.stride)
     self.w_range = utils.get_out_length(w, self.weight_size, self.stride)
     self.tensor_in = x
     tensor_out = np.zeros((batch, self.h_range, self.w_range,
                            self.out_depth)).astype(np.float32)
     for window, i, j, _, _, _, _ in utils.get_window(
             x, self.h_range, self.w_range, self.weight_size, self.stride):
         tensor_out[:, i, j, :] = np.tensordot(window,
                                               self.weight,
                                               axes=([1, 2, 3], [0, 1, 2]))
     self.l2_loss = utils.get_l2_loss(args['weight_decay'], self.weight)
     if self.bias is not None:
         self.value = tensor_out + self.bias
         self.l2_loss += utils.get_l2_loss(args['weight_decay'], self.bias)
     self.value = tensor_out
コード例 #6
0
    def backward(self, gradient, *dummy):
        batch, _, _, c = self.tensor_in.shape
        dJ_dlayer = np.zeros_like(self.tensor_in).astype(np.float32)

        if self.type == 'average':
            pooling_kernel = np.ones(
                [batch, self.pool_size, self.pool_size, c]) / self.pool_size**2
        for window, i, j, h1, h2, w1, w2 in utils.get_window(
                self.tensor_in, self.h_range, self.w_range, self.pool_size,
                self.stride):
            gradient_window = gradient[0][:, i:i + 1, j:j + 1, :]
            if self.type == 'max':
                pooling_kernel = window.reshape([batch, -1, c])
                pooling_kernel = np.array(
                    pooling_kernel.max(axis=1, keepdims=True) ==
                    pooling_kernel).astype(np.float32).reshape(window.shape)
            dJ_dlayer[:, h1:h2, w1:w2, :] += gradient_window * pooling_kernel
        self.grads = [dJ_dlayer]
コード例 #7
0
    def backward(self, gradient, args):
        f_size = self.weight_size
        dJ_dw = np.zeros(self.weight.shape).astype(np.float32)
        dJ_dlayer = np.zeros(self.tensor_in.shape).astype(np.float32)
        for window, i, j, h1, h2, w1, w2 in utils.get_window(
                self.tensor_in, self.h_range, self.w_range, f_size,
                self.stride):
            gradient_window = np.expand_dims(
                gradient[0][:, i:i + 1, j:j + 1, :], 3)
            dJ_dw += np.sum(np.expand_dims(window, 4) @ gradient_window, 0)
            dJ_dlayer[:, h1:h2, w1:w2, :] += np.sum(
                gradient_window * np.expand_dims(self.weight, 0), -1)

        # utils.apply_gradient
        self.weight, self._dJ_dw_accu = utils.apply_gradient(
            self.weight, self._dJ_dw_accu, dJ_dw, args)

        if self.bias is not None:
            dJ_db = np.sum(gradient[0], (0, 1, 2))
            self.bias, self._dJ_db_accu = utils.apply_gradient(
                self.bias, self._dJ_db_accu, dJ_db, args)
            self.grads = [dJ_dlayer, dJ_dw, dJ_db]
        else:
            self.grads = [dJ_dlayer, dJ_dw]
コード例 #8
0
var_name = "production"
# Visualise data
plt.title("Dataset:")
plt.plot(df[var_name])
plt.show()

# Use green area as training data (non-anomalous)
start, end = 50, 100
plt.title("Non-anomalous data (green segment) used as training data")
plt.plot(df[var_name])
plt.plot(df[var_name][start:end], c='g')
plt.show()

# Create a trajectory matrix, i.e. rolling window representation
timeseries_train = df[var_name][start:end]
traj_mat_train = utils.get_window(timeseries_train, backward=4)

timeseries_test = df[var_name]
traj_mat_test = utils.get_window(timeseries_test, backward=4)

lae = LSTMAutoEncoder()
lae.fit(traj_mat_train)
scores = lae.predict(traj_mat_test)

ratio = 0.99
sorted_scores = sorted(scores)
threshold = sorted_scores[round(len(scores) * ratio)]

plt.plot(scores)
plt.plot([threshold] * len(scores), c='r')
plt.title("Reconstruction errors")