コード例 #1
0
 def save(self, fname, x):
     path = os.path.join(self.path, fname + self.ext)
     if os.path.exists(path) and 'grad' in fname:
         x = cp.asnumpy(x)
         old = np.load(path, mmap_mode='c')
         np.save(path, old + x)
     cp.save(path, x)
コード例 #2
0
    def save_mean_and_variance(self):
        subset_size = self.num_observations_per_file
        new_total_size = self.total_images + subset_size
        co1 = self.total_images / new_total_size
        co2 = subset_size / new_total_size

        images = cp.asarray(self.images)

        subset_mean = cp.mean(images, axis=(0, 1))
        subset_var = cp.var(images, axis=(0, 1))

        new_dataset_mean = subset_mean if self.dataset_mean is None else co1 * self.dataset_mean + co2 * subset_mean
        new_dataset_var = subset_var if self.dataset_var is None else co1 * (
            self.dataset_var + self.dataset_mean**2) + co2 * (
                subset_var + subset_mean**2) - new_dataset_mean**2

        # avoid negative value
        new_dataset_var[new_dataset_var < 0] = 0

        self.dataset_var = new_dataset_var
        self.dataset_mean = new_dataset_mean
        self.dataset_std = cp.sqrt(self.dataset_var)

        cp.save(os.path.join(self.path, "mean.npy"), self.dataset_mean)
        cp.save(os.path.join(self.path, "std.npy"), self.dataset_std)
コード例 #3
0
def main():
    count = 0
    temp = torch.zeros((384, 256, 3), dtype=torch.float64)
    temp.cuda()

    for id in idlist:
        img_np = torch.tensor(np.asarray(sampler.id2pix(str(id))),
                              dtype=torch.float64)
        img_np.cuda()
        """
        for x in range(384):
            for y in range(256):
                for c in range(3):
                    temp[x][y][c] += img_np[x][y][c]/255
        """
        try:
            temp += img_np / 255
        except:
            print("ERROR")
        count += 1
        if count % 10000 == 0: print(count)
    temp = temp.numpy()

    cp.save('pix_mean', temp)
    plt.imshow(temp)
    plt.show()
コード例 #4
0
def mlp(path):
    
    X,y,Xt,yt,cols = get_data(fillna=0, norm=True, dropids=True)
    X = cp.asarray(X)
    y = cp.asarray(y)
    Xt = cp.asarray(Xt)
    yt = cp.asarray(yt)
    params = {
                 'learning_rate_init':0.005,
                 'verbose': True,
                 'hidden_layer_sizes': (64,64,64),
                 'max_iter':100,
                 'batch_size':4096,
                 'shuffle':True,
                 'alpha':0.000,
                 'model_path':f'{path}/cache/mlp.pth',
             }

    clf = MLPClassifier(**params)
    clf.fit(X, y, Xt, yt)

    yp = clf.predict_proba(Xt)[:,1]
    score = roc_auc_score(yt, yp)
    print('AUC: %.4f'%score)

    cp.save('mlp.npy', yp)
    #return yp
    print(cols)
    return score
コード例 #5
0
 def save(self):
     path_gamma = self.prefix + self.name + '_gamma.npy'
     path_beta = self.prefix + self.name + '_beta.npy'
     with open(path_gamma, 'wb') as f:
         cp.save(f, self.gamma)
     with open(path_beta, 'wb') as f:
         cp.save(f, self.beta)
コード例 #6
0
 def save(self):
     path_w = self.prefix + self.name + '_w.npy'
     path_b = self.prefix + self.name + '_b.npy'
     with open(path_w, 'wb') as f:
         cp.save(f, self.weights)
     with open(path_b, 'wb') as f:
         cp.save(f, self.b)
コード例 #7
0
ファイル: makeSA.py プロジェクト: thnguyen996/fault-injection
 def np_to_cp(self):
     for name, param in self.state_dict.items():
         if "weight" in name:
             param_np = param.cpu().numpy()
             param_cp = cp.asarray(param_np)
             cp.save("./save_cp/" + str(name) + ".npy", param_cp)
     print("Converted weights to cupy")
コード例 #8
0
ファイル: xgb.py プロジェクト: daxiongshu/riiid-rapids
def xgb(path):

    X, y, Xt, yt, cols = get_data()
    params = {
        'n_estimators': 100,
        'eta': 0.1,
        'early_stopping_rounds': 10,
        'max_depth': 7,
        'colsample_bytree': 1.0,
        'subsample': 0.5,
        'verbosity': 1,
        'objective': 'binary:logistic',
        'eval_metric': 'auc',
        'validation_fraction': 0,
    }

    clf = XGBClassifier(**params)
    clf.fit(X, y, Xt, yt)
    clf.clf.save_model(f'{path}/cache/xgb.json')

    yp = clf.predict_proba(Xt)
    #return yp
    print(cols)
    cp.save('xgb_va.npy', cp.asarray(yp))
    yx = clf.predict_proba(X)
    cp.save('xgb_tr.npy', cp.asarray(yx))
    return roc_auc_score(yt, yp)
コード例 #9
0
    def build_cbow_model(self):
        #Iterate over epochs
        for k in range(self.epochs):
            print("We are at epoch : ", k + 1)
            #For each training example
            for i in range(len(self.X_train)):

                #Forward propagation of the CBOW network-----

                #Take average
                x = np.zeros((self.vocab_size, 1))
                for word in self.X_train[i]:
                    x += self.one_hot(self.words_to_int[word])
                x /= len(self.X_train[i])

                h = np.dot(self.w_hidden.T, x)
                '''
				h = np.zeros((self.dim, 1))
				for word in self.X_train[i]:
					h += np.dot(self.w_hidden.T, self.one_hot(self.words_to_int[word]))
				h/=len(self.X_train[i])
				print ("Forward propagation done...",  i, k)

				'''
                print("-----------")
                print("Forward propagation done CBOW...: ", i, "Epoch: ",
                      k + 1)
                #h = np.dot(self.w_hidden.T , onehot(X_train[i])
                u = np.dot(self.w_output.T, h)
                pred = self.softmax(u)

                #Backward propagation------
                #err_sum = np.zeros((self.vocab_size,1))

                err = pred - self.one_hot(self.words_to_int[self.Y_train[i]])
                print("Calculated error..", i, k + 1)

                #Calculate dL/dW

                dw_hidden = np.outer(x, np.dot(self.w_output, err))

                #Calculate dL/dW'
                dw_output = np.outer(h, err)

                #Gradient descent
                self.w_hidden += -self.lr * dw_hidden
                self.w_output += -self.lr * dw_output
                print("Gradient descent done..", i, k + 1)

            #Update model after each epoch
            print("Saving model...")
            for key, value in self.words_to_int.items():
                self.model[key] = self.w_hidden[value].reshape(
                    1, self.w_hidden.shape[1])

            #Store model after every epoch

            print("Model to npy file...")
            np.save('./utils/cbow_new_' + str(k), self.model)
コード例 #10
0
    def build_skipgram_model(self):
        #Iterate over epochs
        print("No. of training samples are: ", len(self.X_train))
        for k in range(self.epochs):
            print("We are at epoch : ", k)
            print()
            print("No. of training samples: ", len(self.X_train))
            #For each training example
            for i in range(len(self.X_train)):

                #Forward propagation of the SkipGram network-----
                #Here X_train[i] is a Vx1 vector.
                #print "self.X_train[i] is ", self.X_train[i]
                #print "self.words_to_int[i] is ", self.words_to_int[self.X_train[i]]

                h = np.dot(self.w_hidden.T,
                           self.one_hot(self.words_to_int[self.X_train[i]]))
                output = np.dot(self.w_output.T, h)
                pred = self.softmax(output)
                print("---------------")
                print("Forward propagation done SKIPGRAM...",
                      str(i) + "/" + str(len(self.X_train)), " Epoch: ",
                      str(k + 1) + "/" + str(self.epochs))

                #Backward propagation------
                err_sum = np.zeros((self.vocab_size, 1))

                for word in self.Y_train[i]:
                    err_sum += (pred - self.one_hot(self.words_to_int[word]))

                #err_sum/= self.vocab_size
                print("Calculated error..", i, k + 1)

                #Calculate dL/dW
                dw_hidden = np.outer(
                    self.one_hot(self.words_to_int[self.X_train[i]]),
                    np.dot(self.w_output, err_sum))

                #Calculate dL/dW'
                dw_output = np.outer(h, err_sum)

                #Gradient descent
                self.w_hidden += -self.lr * dw_hidden
                self.w_output += -self.lr * dw_output

                print("Gradient descent done..", i, k + 1)

            #Update model after each epoch
            print("Saving model...")
            for key, value in self.int_to_words.items():
                self.model[value] = self.w_hidden[key].reshape(
                    1, self.w_hidden.shape[1])

            #Store model after every epoch
            #if (k!k%2==0):
            print("Model to npy file...")
            np.save('./utils/skipgram_' + str(k), self.model)
コード例 #11
0
def save_map(state_dict, save_map_path, device):
    save_weights = collections.OrderedDict({})
    save_binary = collections.OrderedDict({})
# Save mapped float weights
    for name, param in tqdm(state_dict.items(), desc="Saving weight maps: "):
        if "weight" in name:
            weights = param.view(-1)
            map_cases = wmp.mapallweights2(weights)
            map_cases_np = map_cases.cpu().numpy()
            map_cases_cp = cp.asarray(map_cases_np)
            cp.save(save_map_path + str(name) + ".npy", map_cases_cp)
コード例 #12
0
ファイル: test_npz.py プロジェクト: wongalvis14/cupy
    def test_save_pickle(self):
        data = object()

        sio = io.BytesIO()
        with self.assertRaises(ValueError):
            cupy.save(sio, data, allow_pickle=False)
        sio.close()

        sio = io.BytesIO()
        cupy.save(sio, data, allow_pickle=True)
        sio.close()
コード例 #13
0
ファイル: test_npz.py プロジェクト: wongalvis14/cupy
    def test_save_load(self, dtype):
        a = testing.shaped_arange((2, 3, 4), dtype=dtype)
        sio = io.BytesIO()
        cupy.save(sio, a)
        s = sio.getvalue()
        sio.close()

        sio = io.BytesIO(s)
        b = cupy.load(sio)
        sio.close()

        testing.assert_array_equal(a, b)
コード例 #14
0
ファイル: test_npz.py プロジェクト: 2php/chainer
    def test_save_load(self, dtype):
        a = testing.shaped_arange((2, 3, 4), dtype=dtype)
        sio = six.BytesIO()
        cupy.save(sio, a)
        s = sio.getvalue()
        sio.close()

        sio = six.BytesIO(s)
        b = cupy.load(sio)
        sio.close()

        testing.assert_array_equal(a, b)
コード例 #15
0
    def save(self, savedir):
        """ persist map in `savedir` """
        if not os.path.isdir(savedir):
            os.makedirs(savedir)
        # Persist arrays
        dsave = {}
        dsave['unique_state_ids'] = self.unique_state_ids, 
        dsave['unique_contagiousities'] = self.unique_contagiousities, 
        dsave['unique_sensitivities'] =  self.unique_sensitivities, 
        dsave['unique_severities'] =  self.unique_severities, 
        dsave['cell_ids'] =  self.cell_ids, 
        dsave['unsafeties'] = self.unsafeties, 
        dsave['square_sampling_probas'] =  self.square_sampling_probas, 
        dsave['eligible_cells'] =  self.eligible_cells,
        dsave['coords_squares'] =  self.coords_squares,
        dsave['square_ids_cells'] =  self.square_ids_cells,
        dsave['cell_sampling_probas'] = self.cell_sampling_probas, 
        dsave['cell_index_shift'] = self.cell_index_shift,
        dsave['agent_ids'] = self.agent_ids,
        dsave['p_moves'] = self.p_moves,
        dsave['least_state_ids'] = self.least_state_ids,
        dsave['unique_state_ids'] = self.unique_state_ids,
        dsave['home_cell_ids'] = self.home_cell_ids,
        dsave['current_state_ids'] = self.current_state_ids,
        dsave['current_state_durations'] = self.current_state_durations,
        dsave['agent_squares'] = self.agent_squares
        dsave['transitions'] = self.transitions,
        dsave['transitions_ids'] = self.transitions_ids,
        dsave['durations'] = self.durations,
        dsave['r_factors'] = self.r_factors, 
        dsave['infecting_agents'] = self.infecting_agents, 
        dsave['infected_agents'] = self.infected_agents, 
        dsave['infected_periods'] = self.infected_periods

        for fname, arr in dsave.items():
            filepath = os.path.join(savedir, f'{fname}.npy')
            cp.save(filepath, arr)

        # Persist scalars and other parameters
        sdict = {}
        sdict['current_period'] = self.current_period
        sdict['verbose'] = self.verbose
        sdict['dcale'] = self.dscale
        sdict['n_infected_period'] = self.n_infected_period
        sdict['n_diseased_period'] = self.n_diseased_period

        sdict_path = os.path.join(savedir, 'params.pkl')
        with open(sdict_path, 'wb') as f:
            pickle.dump(sdict, f, protocol=pickle.HIGHEST_PROTOCOL)

        if self.verbose > 0:
            print(f'Map persisted under folder: {savedir}')
コード例 #16
0
 def multiply(self, factor, new_files):
     loc = 0
     norm = np.zeros(self.num_rows)
     for i in range(len(self.files)):
         with cp.cuda.Device(self.gpu_list[i % self.num_gpus]):
             coef = cp.asarray(factor)
             matrix_part = cp.load(self.files[i])
             n = matrix_part.shape[0]
             matrix_part = matrix_part * coef
             cp.save(new_files[i], matrix_part)
             norm_part = cp.sum(matrix_part, axis=1)
             norm[loc:loc + n] = norm_part.get()
             loc = loc + n
     return norm
コード例 #17
0
ファイル: train.py プロジェクト: ShenDezhou/powerlawapsp
def main(config_file):
    # 0. Load config and mkdir
    with open(config_file) as fin:
        config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))

    data = Data(config.train_file_path)
    mat = data.load_file()

    t = time.process_time()
    apsp = MODEL_MAP[config.model_type](mat, config)
    mr = apsp.apsp(g_diameter=config.diameter)
    te = time.process_time()
    print('time:', (te - t))
    print(mr.shape)
    if config.device == 'cpu':
        numpy.save(mr, config.experiment_name + '.npz')
    else:
        cupy.save(mr, config.experiment_name + '.npz')
コード例 #18
0
ファイル: AlignedVectors.py プロジェクト: mcyph/pos_tagger
    def __get_D_word_embeddings(self, path):
        if exists(f'{path}.{EMBEDDINGS_LIMIT}.npy'):
            with open(f'{path}.{EMBEDDINGS_LIMIT}.pkl', mode='rb') as f:
                DFreqs, DFreqsToWord = pickle.load(f)
            with open(f'{path}.{EMBEDDINGS_LIMIT}.npy', mode='rb') as f:
                a = np.load(f)
            return DFreqs, DFreqsToWord, a

        DWords = {}  # Note this isn't stored in `self`
        DFreqs = {}
        DFreqsToWord = {}

        with open(path, 'r', encoding='utf-8') as f:
            for x, line in enumerate(f):
                values = line.rstrip().rsplit(' ')
                word = values[0]
                coefs = np.asarray(values[1:], dtype='float32')
                DWords[word] = coefs
                DFreqs[word] = x  # More common words might be higher up(?)
                DFreqsToWord[x] = word

                if x % 1000 == 0:
                    print(x)

                if x > EMBEDDINGS_LIMIT:
                    # A lot of the time it might be better to
                    # clip results - millions of results might actually reduce
                    # the quality as frequencies get lower!
                    break
                #print(word)

        a = np.ndarray(shape=(len(DWords), len(DWords[word])), dtype='float32')
        for word, vec in DWords.items():
            # NOTE: the index is also the frequency of the word
            #   (with 0 being the most common), as the files
            #   are sorted in order of most to least common
            a[DFreqs[word], :] = vec

        with open(f'{path}.{EMBEDDINGS_LIMIT}.pkl', mode='wb') as f:
            pickle.dump((DFreqs, DFreqsToWord), f)
        with open(f'{path}.{EMBEDDINGS_LIMIT}.npy', mode='wb') as f:
            np.save(f, a)
        return DFreqs, DFreqsToWord, a
コード例 #19
0
def get_resized_data(subset: str,
                     PATH: str,
                     resize: tuple,
                     fn_type='.txt',
                     sep=' ',
                     header=None):
    try:
        print('Loading the resized', subset, 'images ...', end=' ')
        X = np.load(PATH + 'X_' + subset[:2] + '_' + str(resize[0]) + '.npy',
                    allow_pickle=True)
        y = np.load(PATH + 'y_' + subset[:2] + '.npy', allow_pickle=True)
    except:
        print('Failed! QAQ')
        print('Loading the original', subset, 'images ...', end=' ')
        img_list, fn_list, y = load_images(subset, PATH, fn_type, sep, header)
        X = img_resize(img_list, resize)
        np.save(PATH + 'X_' + subset[:2] + '_' + str(resize[0]) + '.npy', X)
        np.save(PATH + 'y_' + subset[:2] + '.npy', y)
    print('Done!')
    return X, y
コード例 #20
0
def read_to_dat():
    # Load raw images from ubytes
    def read_train_data():
        x, y = loadlocal_mnist(
                images_path = train_images_raw,
                labels_path = train_labels_raw)
        return x, y
    def read_test_data():
        x, y = loadlocal_mnist(
                images_path = test_images_raw,
                labels_path = test_labels_raw)
        return x, y
    def convert_targets_train(train_array):
        target_array = np.zeros((SAMPLES, NEURONS), dtype="float32")
        for t in range(SAMPLES):
            target_array[t][int(train_array[t])] = 1
        return target_array
    def convert_targets_test(test_array):
        target_array = np.zeros((SAMPLES_T, NEURONS), dtype='float32')
        for t in range(SAMPLES_T):
            target_array[t][int(test_array[t])] = 1
        return target_array

    train_images, train_labels = read_train_data()
    train_labels = convert_targets_train(train_labels)
    # Normalize
    train_images = train_images / 255
    # Create memmap pointer on disk and read into .dats
    np.save(train_images_dat, train_images, True)
    # fp0 = np.memmap(train_images_dat, dtype='float64',
    #                 mode='w+', shape=(SAMPLES,INPUTS))
    # fp0 = np.memmap(train_images_dat, mode='w+', shape=(SAMPLES,INPUTS))
    # Copy into file pointer
    # fp0[:] = train_images[:]
    # del fp0
    np.save(train_labels_dat, train_labels, True)
    # fp1 = np.memmap(train_labels_dat, dtype='float64',
    #                 mode='w+', shape=(SAMPLES,NEURONS))
    # fp1[:] = train_labels[:]
    # del fp1

    # Same as above but with test samples
    test_images, test_labels = read_test_data()
    test_labels = convert_targets_test(test_labels)
    test_images = test_images / 255
    np.save(test_images_dat, test_images, True)
    # fp2 = np.memmap(test_images_dat, dtype='float64',
    #                 mode='w+', shape=(SAMPLES_T,INPUTS))
    # fp2[:] = test_images[:]
    # del fp2
    np.save(test_labels_dat, test_labels, True)
コード例 #21
0
ファイル: reconstruct.py プロジェクト: fiarabbit/fMRI_AE
def main(id):
    with chainer.using_config("train", False):
        with chainer.using_config("enable_backprop", False):
            model_path = "/efs/fMRI_AE/SimpleFCAE_E32D32/model/model_iter_108858"

            gpu = 0
            get_device_from_id(gpu).use()
            """NibDataset
            def __init__(self, directory: str, crop: list):
            """
            crop = [[9, 81], [11, 99], [0, 80]]
            test_dataset = NibDataset("/data/test", crop=crop)
            """
            def __init__(self, dataset, batch_size, repeat=True, shuffle=True):
            """
            mask = load_mask_nib("/data/mask/average_optthr.nii", crop)

            model = Model(mask, 2, "mask", "mask")
            load_npz(model_path, model)
            model.to_gpu()

            for i in range(len(test_dataset)):
                if i % 8 != id:
                    continue
                inp = to_gpu(test_dataset.get_example(i))
                inp = xp.expand_dims(inp, 0)
                subject = test_dataset.get_subject(i)
                frame = test_dataset.get_frame(i)
                sys.stdout.write("\rsubject{:03d} frame{:03d}".format(
                    subject, frame))
                sys.stdout.flush()
                out = model.reconstruct(inp).array
                out = xp.squeeze(out)
                xp.save(
                    "/efs/fMRI_AE/SimpleFCAE_E32D32/reconstruct/reconstruction_subject{:03d}_frame{:03d}.npy"
                    .format(subject, frame), out)
コード例 #22
0
def save_arrays(array_dir, **arrays):
    for name, array in arrays.items():
        cp.save("%s/%s" % (array_dir, name), array)
コード例 #23
0
 def saveCupy(self, saveFileName):
     cupy.save(saveFileName, self.CUPYcorpus)
コード例 #24
0
ファイル: cplib.py プロジェクト: bfrosik/pycdi
 def save(filename, arr):
     cp.save(filename, arr)
コード例 #25
0
 def save(self, tsr, filename):
     with open(filename, 'w+b') as file:
         cp.save(file, tsr.unwrap(), allow_pickle=False)
コード例 #26
0
ファイル: pool_layer.py プロジェクト: Noroskir/Runescape-Bot
 def save_layers(self, path, i):
     """Save weights and biases to file. """
     np.save("{:}/w_layer{:}.npy".format(path, i), self.W)
     np.save("{:}/b_layer{:}.npy".format(path, i), self.b)
コード例 #27
0
bytes_length = sys.getsizeof(10**end)

for i in range(start, end + 1, step):
    print(f'Preparing for length {i}')

    # Generate random array
    tensor = cp.random.randint(0, R, size=(ns.pairs * 2, i // ns.segment))

    # Iterate through all arrays and convert into integers
    integers = set()

    for j in range(tensor.shape[0]):
        x = int(''.join([
            n.zfill(len(n) + (-len(n) % ns.segment))
            for n in (str(m) for m in tensor[j])
        ]))

        if random.choice([False, True]):
            tensor[j] *= -1
            x *= -1

        integers.add(x)

    # Save tensor to file
    cp.save(f'{path_tensors}/{i}', tensor)

    # Save integers to file
    with open(f'{path_integers}/{i}.lsi', 'wb') as f:
        for x in integers:
            x = x.to_bytes(bytes_length, byteorder='big', signed=True)
            f.write(x + b'[END_OF_INTEGER]')
コード例 #28
0
    print("Running FMAEE:")
    start = time.time()
    # Here we process (train/execute) each individual observation.
    # In this way, X is essentially a stream, and each observation is discarded after performing process() method.
    for j in range(1, epoch * 2 + 1):
        for i in range(X.shape[0]):
            K.process(
                X[i, ]
            )  #will train during the grace periods, then execute on all the rest.
            if i % X.shape[0] == 0:
                print(str(j) + " epoch")
    K.process(X[0, ])  #will trigger saving the models
    stop = time.time()
    print("Training completed in: " + str(round(stop - start)) + " seconds")

    RMSEs = np.zeros(X.shape[0])  # a place to save the scores
    for i in range(X.shape[0]):
        RMSEs[i] = K.execute(X[i, ])
    if settings["sens"] == "low":
        threshold = np.mean(RMSEs) + np.std(RMSEs)
        level = 1
    if settings["sens"] == "med":
        threshold = np.mean(RMSEs) + 2 * np.std(RMSEs)
        level = 2
    if settings["sens"] == "high":
        threshold = np.mean(RMSEs) + 3 * np.std(RMSEs)
        level = 3

    params = [threshold, level, np.mean(RMSEs), np.std(RMSEs)]
    np.save("./models/threshold.npy", params)
コード例 #29
0
ファイル: postprocess.py プロジェクト: mmyros/pykilosort
 def _save(name, arr, dtype=None):
     cp.save(join(savePath, name + '.npy'), arr.astype(dtype or arr.dtype))
コード例 #30
0
ファイル: CPUCupyPinned.py プロジェクト: y-hann/SpeedTorch
 def saveCupy(self, saveFileName):
     cupy.save(saveFileName, self.CUPYmemmap)
コード例 #31
0
def main(id):
    model_path = "/efs/fMRI_AE/SimpleFCAE_E32D32/model/model_iter_108858"

    gpu = 0
    get_device_from_id(gpu).use()
    """NibDataset
    def __init__(self, directory: str, crop: list):
    """
    crop = [[9, 81], [11, 99], [0, 80]]
    test_dataset = NibDataset("/data/test", crop=crop)

    mask = load_mask_nib("/data/mask/average_optthr.nii", crop)
    """SimpleFCAE_E32D32
    def __init__(self, mask, r: int, in_mask: str, out_mask: str):
    """
    model = Model(mask, 2, "mask", "mask")
    load_npz(model_path, model)
    model.to_gpu()

    # feature_idx = 0
    # feature_idx = (0, 4, 5, 5) # == [0, 9/2, 11/2, 10/2]
    # feature_idx = (0, 1, 1, 1)
    feature_idx = (0, 2, 7, 4)
    resample_size = 100
    batch_size = 10
    noise_level = 0.2

    for i in range(len(test_dataset)):
        if i % 8 != id:
            continue
        print("{:4}/{:4}".format(i, len(test_dataset)))
        subject = test_dataset.get_subject(i)
        frame = test_dataset.get_frame(i)
        test_img = xp.asarray(test_dataset[i])

        resample_remain = resample_size
        resample_processed = 0
        ret = xp.zeros(test_img.shape)
        while resample_remain > 0:
            batch_size_this_loop = min(batch_size, resample_remain)
            resample_remain -= batch_size_this_loop

            batch = xp.broadcast_to(
                test_img, chain((batch_size_this_loop, ), test_img.shape))
            sigma = noise_level / (xp.max(test_img) - xp.min(test_img))
            batch += sigma * xp.random.randn(*batch.shape)

            x = Variable(batch)

            feature = model.extract(x)
            assert feature.shape == (batch_size, 1, 9, 11, 10)
            feature = F.sum(feature, axis=0)
            assert feature.shape == (1, 9, 11, 10)
            feature = F.get_item(feature, feature_idx)
            feature.backward()
            grad = xp.mean(x.grad, axis=0)
            ret = (ret * resample_processed + grad * batch_size_this_loop) / (
                resample_processed + batch_size_this_loop)
            model.cleargrads()

        xp.save(
            "/efs/fMRI_AE/SimpleFCAE_E32D32/grad/sensitivity_map_feature_{}_{}_{}_subject{:03d}_frame{:03d}"
            .format(feature_idx[1], feature_idx[2], feature_idx[3], subject,
                    frame), ret)