示例#1
0
def image_formation():
    Ncopy = 1000
    use_sPCA = False
    image_idx = 0
    SNR = .1
    r_max = 64
    seed = 0
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)
    Rl = np.expand_dims(np.exp(1j * 2 * np.pi * 45 / 360 * Freqs), axis=-1)
    Coeff_rotated = Coeff_raw * Rl

    image_rotated = recover_image(Coeff_rotated, Phi_ns, Freqs, r_max, Mean)

    image_rotated_noisy = recover_image(np.expand_dims(Coeff[:, 0], axis=-1),
                                        Phi_ns, Freqs, r_max, Mean)

    plt.subplot(131)
    plt.imshow(image0, cmap='gray')
    plt.axis('off')
    plt.subplot(132)
    plt.imshow(image_rotated, cmap='gray')
    plt.axis('off')
    plt.subplot(133)
    plt.imshow(image_rotated_noisy, cmap='gray')
    plt.axis('off')
    plt.savefig('mra_2d_observations_model.png')
    plt.savefig('mra_2d_observations_model.eps')
    plt.clf()
示例#2
0
def draw_from_prior():
    Ncopy = 10000
    use_sPCA = True
    image_idx = 0
    SNR = 10
    filename = 'E70s.mat'
    r_max = 64
    seed = 0
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    data_prior = np.expand_dims(4 * np.exp(-np.asarray(Freqs) / 8), axis=-1)
    Gamma_a = np.diag(np.abs(data_prior[:, 0])**2)
    np.random.seed(10)

    for i in range(9):
        x = 1 / np.sqrt(2) * (
            np.random.multivariate_normal(np.zeros(
                (Freqs.shape[0], )), Gamma_a) +
            1j * np.random.multivariate_normal(np.zeros(
                (Freqs.shape[0], )), Gamma_a))
        Coeff_raw = np.expand_dims(x, axis=-1)
        image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)
        plt.subplot('33%d' % (i + 1))
        plt.imshow(image0, cmap='gray')
        plt.axis('off')
    plt.savefig('draw_from_prior.png')
    plt.savefig('draw_from_prior.eps')
    plt.clf()
示例#3
0
    def train(self):
        #data agumentation
        aug = ImageDataGenerator(rotation_range=25,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 shear_range=0.2,
                                 zoom_range=0.2,
                                 horizontal_flip=True,
                                 fill_mode="nearest")
        # Model Checkpoint
        cpt_save = ModelCheckpoint('models_save/my_weight.h5',
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

        X_data, y_data = make_data()
        (X_train, X_val, y_train, y_val) = train_test_split(X_data,
                                                            y_data,
                                                            test_size=0.2,
                                                            random_state=123)
        print("Training......")
        self.model.fit(aug.flow(X_train, y_train, batch_size=32),
                       validation_data=(X_val, y_val),
                       callbacks=[cpt_save],
                       verbose=1,
                       epochs=50,
                       steps_per_epoch=len(X_train) / 32)
        #lưu model sau khi đã trên xong
        self.model.save('models_save/my_model.h5')
示例#4
0
def run_test():
    print('test')
    pool = Pool(processes=cpu_count())
    X, Y = make_data(pool, 'ted_7_ErasePunc_FullKorean__test.txt')
    print('make test data end.')
    X = norm_many(pool, X)
    print('norm_data end.')
    interference(X, Y, 'model.tfl')
示例#5
0
def run_train(train_file):
    print('train')
    pool = Pool(processes=cpu_count())
    X, Y = make_data(pool, train_file)
    print('make train data end.')
    X = norm_many(pool, X)
    print('norm_data end.')
    train(X, Y, 'model_MDM001.tfl')
示例#6
0
def run_train():
    pool = Pool(processes=cpu_count())
    X, Y = make_data(pool, 'ted_7_ErasePunc_FullKorean__train.txt')
    print('make train data end.')
    X = norm_many(pool, X)
    print('norm train data end.')

    train(X, Y, 'model.tfl')
示例#7
0
def ppm_em_experiment(seed, image_idx, SNR, Ncopy, Ndir, use_sPCA,
                      use_signal_prior, gamma, BW, path_learning, P):
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)

    TT = time()
    Coeff_s, est_rotations = synchronize_2d(Coeff, Freqs, Ndir)
    t_synch = time() - TT

    x_s = np.expand_dims(np.mean(Coeff_s, axis=-1), axis=-1)
    x = align_image(x_s, Coeff_raw, Freqs)
    imager = recover_image(x, Phi_ns, Freqs, r_max, Mean)
    err_synch = np.sqrt(
        np.sum(np.sum((image0 - imager)**2, axis=-1), axis=0) /
        np.sum(np.sum(image0**2, axis=-1), axis=0))

    x, num_iter_synch_em, t_synch_em = EM_General_Prior(Coeff,
                                                        Freqs,
                                                        sigma,
                                                        Ndir,
                                                        1 / Ndir * np.ones(
                                                            (Ndir, 1)),
                                                        Ndir,
                                                        0,
                                                        Coeff_raw,
                                                        use_signal_prior,
                                                        uniform=False)
    x = align_image(x, Coeff_raw, Freqs)
    imager = recover_image(x, Phi_ns, Freqs, r_max, Mean)
    err_synch_em = np.sqrt(
        np.sum(np.sum((image0 - imager)**2, axis=-1), axis=0) /
        np.sum(np.sum(image0**2, axis=-1), axis=0))

    results = pd.DataFrame()
    results = results.append(
        {
            'use_signal_prior': use_signal_prior,
            'use_sPCA': use_sPCA,
            'seed': seed,
            'SNR': SNR,
            'sigma': sigma,
            'N': Ncopy,
            'L': Coeff.shape[0],
            'image_idx': image_idx,
            'err': err_synch_em,
            'num_iter': num_iter_synch_em,
            'BW': BW,
            'gamma': gamma,
            't': t_synch + t_synch_em,
            't_synch': t_synch,
            't_em': t_synch_em,
            'err_synch': err_synch
        },
        ignore_index=True)

    return results
示例#8
0
def run_test():
    pool = Pool(processes=cpu_count())
    X, Y = make_data(pool, 'ted_7_ErasePunc_FullKorean__test.txt')
    print('make test data end.')
    X = norm_many(pool, X)
    print('norm test data end.')

    pred = test(X, Y, 'model.tfl')
    print('pred[:10]={}'.format(pred[:10]))
示例#9
0
def load_example():
    X, y = make_data(request.json['example'])
    example_data = {
        "X": X.tolist(),
        "y": y.tolist()
    }

    id = db.data.insert(example_data)
    return jsonify({
        "id": str(id)
    })
示例#10
0
def known_rotations_experiment(seed, image_idx, SNR, Ncopy, Ndir, use_sPCA):
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)

    est_rotations = rotations
    Coeff_s = np.zeros_like(Coeff)
    for i in range(Ncopy):
        Coeff_s[:, i] = Coeff[:, i] * np.exp(
            -1j * 2 * np.pi * est_rotations[i] / 360 * Freqs)

    t_synch = 0
    h, bin_edges = np.histogram((est_rotations - rotations) % 360,
                                bins=np.arange(-0.5, 360 + 0.5))
    measured_rho = np.expand_dims(h / np.sum(h), axis=-1)
    x_s = np.expand_dims(np.mean(Coeff_s, axis=-1), axis=-1)

    x = align_image(x_s, Coeff_raw, Freqs)
    imager = recover_image(x, Phi_ns, Freqs, r_max, Mean)
    err_synch = np.sqrt(
        np.sum(np.sum((image0 - imager)**2, axis=-1), axis=0) /
        np.sum(np.sum(image0**2, axis=-1), axis=0))
    print('#### oracle ####')
    print('e = %f' % err_synch)

    results = pd.DataFrame()
    results = results.append(
        {
            'use_sPCA': use_sPCA,
            'seed': seed,
            'SNR': SNR,
            'sigma': sigma,
            'N': Ncopy,
            'L': Coeff.shape[0],
            'image_idx': image_idx,
            'rho': measured_rho,
            'err': err_synch,
            'num_iter': 0,
            't': t_synch
        },
        ignore_index=True)

    return results
示例#11
0
def synchronize_and_match_experiment(seed, image_idx, SNR, Ncopy, Ndir,
                                     use_sPCA, P):
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)

    TT = time()
    Coeff_s, est_rotations = synchronize_and_match_2d(Coeff,
                                                      Freqs,
                                                      P=P,
                                                      L=Ndir)
    t_synch = time() - TT

    h, bin_edges = np.histogram((est_rotations - rotations) % 360,
                                bins=np.arange(-0.5, 360 + 0.5))
    measured_rho = np.expand_dims(h / np.sum(h), axis=-1)
    x_s = np.expand_dims(np.mean(Coeff_s, axis=-1), axis=-1)

    x = align_image(x_s, Coeff_raw, Freqs)
    imager = recover_image(x, Phi_ns, Freqs, r_max, Mean)
    err_synch = np.sqrt(
        np.sum(np.sum((image0 - imager)**2, axis=-1), axis=0) /
        np.sum(np.sum(image0**2, axis=-1), axis=0))
    print('#### synchronize and match ####')
    print('e = %f' % err_synch)

    results = pd.DataFrame()
    results = results.append(
        {
            'use_sPCA': use_sPCA,
            'seed': seed,
            'SNR': SNR,
            'sigma': sigma,
            'N': Ncopy,
            'L': Coeff.shape[0],
            'image_idx': image_idx,
            'rho': measured_rho,
            'err': err_synch,
            'num_iter': 0,
            't': t_synch
        },
        ignore_index=True)
    return results
示例#12
0
def standard_em_experiment(seed, image_idx, SNR, Ncopy, Ndir, use_sPCA,
                           use_signal_prior):
    Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
        Ncopy=Ncopy, SNR=SNR, seed=seed, image_idx=image_idx, sPCA=use_sPCA)
    image0 = recover_image(Coeff_raw, Phi_ns, Freqs, r_max, Mean)

    x, num_iter_em_uniform, t_em_uniform = EM_General_Prior(Coeff,
                                                            Freqs,
                                                            sigma,
                                                            Ndir,
                                                            1 / Ndir * np.ones(
                                                                (Ndir, 1)),
                                                            Ndir,
                                                            0,
                                                            Coeff_raw,
                                                            use_signal_prior,
                                                            uniform=True)
    x = align_image(x, Coeff_raw, Freqs)
    imager = recover_image(x, Phi_ns, Freqs, r_max, Mean)
    err_em_uniform = np.sqrt(
        np.sum(np.sum((image0 - imager)**2, axis=-1), axis=0) /
        np.sum(np.sum(image0**2, axis=-1), axis=0))

    results = pd.DataFrame()
    results = results.append(
        {
            'use_signal_prior': use_signal_prior,
            'use_sPCA': use_sPCA,
            'seed': seed,
            'SNR': SNR,
            'sigma': sigma,
            'N': Ncopy,
            'L': Coeff.shape[0],
            'image_idx': image_idx,
            'err': err_em_uniform,
            'num_iter': num_iter_em_uniform,
            't': t_em_uniform
        },
        ignore_index=True)

    return results
示例#13
0
def main(args):

    #######################
    # ディレクトリパスの読み込み
    #######################
    # 今日の日付
    today_time = dt.datetime.today().strftime("%Y_%m_%d")

    # 各ディレクトリの設定
    DATA_DIR, SAVE_DIR = \
        args.data_dir, args.save_dir

    # 'SAVE_DIR'に今日の日付を足す
    do_time = os.datetime.today().strftime("%H_%M_%S")
    SAVE_DIR = os.path.join(SAVE_DIR, today_time, do_time)
    if not os.path.isdir(SAVE_DIR):
        os.makedirs(SAVE_DIR)

    #######################
    # Hyper Parameter
    #######################
    param_dic = {}
    config = configparser.ConfigParser()
    config.read('config.ini')
    param_sphere = config['sphere']
    level = param_dic['level'] = int(param_sphere['level'])
    hemisphere = param_dic['hemisphere'] int(param_sphere['hemisphere'])
    Q = param_dic['Q'] = 2**level
    title = param_dic['title'] = param_sphere['title']

    param = config['parameter']
    batch_size = param_dic['batch_size'] = param['batch_size']
    max_epochs = param_dic['max_epochs'] = param['max_epochs']
    evaluate_num = param_dic['evaluate_sample_num_per_epoch'] = param['evaluate_num']
    out_num = param_dic['out_num'] = param['out_num']
    filter_num = param_dic['filter_num'] = param['filter_num']
    filter_size = param_dic['filter_size'] = param['filter_size']
    padding = param_dic['padding'] = param['padding']
    stride = param_dic['stride'] = param['stride']
    hidden_size = param_dic['hidden_size'] = param['hidden_size']
    optimizer = param_dic['optimizer'] = param['optimizer']
    lr = param_dic['lr'] = param['optimizer']
    weight_init_std = param_dic['weight_init_std'] = param['weight_init_std']

    # 保存
    with open(SAVE_DIR + 'setting.txt', 'a') as f:
        for key, value in param_dic.items():
            f.write(str(key) + ':' + str(value))
            f.write('\n')

    #######################
    # 球面座標の獲得
    #######################
    img_ori = np.zeros((224, 224, 3))
    img_fish, pos = fish.make_considered_picture(img=img_ori, level=level, return_array=1)
    # print("level: ", level)
    # print(img_fish.shape)

    #######################
    # Load Data
    #######################
    ndar = np.load(data_dir)
    train = ndar['train']
    test = ndar['test']

    #######################
    # Make data
    # データをネットワークに適した形に変形する
    #######################
    x_train, y_train = make_data(train)
    x_test, y_test = make_data(test)

    #######################
    # 球面画像の獲得
    #######################
    x_train = get_sphere_array(x_train, pos)
    x_test = get_sphere_array(x_test, pos)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    if x_train.shape[-1] == 3:
        print('change channel')
        x_train = x_train[:, :, :, 0]
        x_test = x_test[:, :, :, 0]
    # if len(x_test) > 1000:
    #     tmp = 1000
    #     x_train = np.concatenate((x_train, x_test[tmp:]), axis=0)
    #     x_test = x_test[:tmp]
    #     y_train = np.concatenate((y_train, y_test[tmp:]), axis=0)
    #     y_test = y_test[:tmp]
    x_test = x_test[:1000]
    y_test = y_test[:1000]

    #######################
    # Train
    #######################
    network = SphereConv(input_dim=(3, 6, Q+2, 2*Q+2),
                         conv_param={'filter_num': filter_num,
                                     'filter_size': filter_size,
                                     'pad': padding, 'stride': stride},
                         hidden_size=hidden_size, output_size=out_num,
                         weight_init_std=weight_init_std, level=level)

    trainer = Trainer(network, x_train, y_train, x_test[:100], y_test[:100],
                      epochs=max_epochs, mini_batch_size=batch_size,
                      optimizer=optimizer, optimizer_param={'lr': lr},
                      evaluate_sample_num_per_epoch=evaluate_num)
    trainer.train()

    #######################
    # パラメータの保存
    #######################
    network.save_params(save_dir+"params.pkl")
    print("Saved Network Parameters!")

    try:
        print('save acc')
        a = np.array(trainer.train_acc_list)
        b = np.array(train_acc_list.test_acc_list)
        np.savez('acc.npz', train=a, test=b)
    except:
        print('error in saving acc')
    # グラフの描画
    markers = {'train': 'o', 'test': 's'}
    x = np.arange(max_epochs)
    plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
    plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
    plt.xlabel("epochs")
    plt.ylabel("accuracy")
    plt.ylim(0, 1.0)
    plt.legend(loc='lower right')
    plt.savefig(save_dir+"test.png")
         label=r'$\sqrt{2}/2$')
plt.xlim(0, 0.5 * fs)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(alpha=0.5)
plt.legend(framealpha=1, shadow=True, loc='best')
plt.title("Amplitude response for\nButterworth bandpass filters", fontsize=10)
plt.text(430,
         0.07,
         "lowcut: %4g Hz\nhighcut: %4g Hz" % (lowcut, highcut),
         fontsize=8)
plt.tight_layout()
plt.savefig("bandpass_example_response.pdf")

T = 0.03
t, x = make_data(T, fs)

y = butter_bandpass_filt(x, lowcut, highcut, fs, order=12)

plt.figure(figsize=(4.0, 2.8))
plt.plot(t, x, 'k', alpha=0.4, linewidth=1, label='Noisy signal')

plt.plot(t, y, 'C0', label='Filtered signal')
plt.xlabel('Time (seconds)')
plt.grid(alpha=0.5)
plt.axis('tight')
plt.xlim(0, T)
plt.legend(framealpha=1, shadow=True, loc='upper left')
plt.tight_layout()
plt.savefig("bandpass_example_signals.pdf")
示例#15
0
def train (training_x, training_y, testing_x, testing_y):
	lr=1e-4
	decay=1e-6
	epochs=10

	model = get_model()

	opt = Adam(lr=lr, decay=decay)
	model.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
	model.fit(training_x, training_y, epochs=epochs, validation_data=(testing_x, testing_y))

	return model

if __name__ == '__main__':
	(training_x, training_y), (testing_x, testing_y) = make_data()

	os.chdir("../../..")
	trained_model = train(training_x, training_y, testing_x, testing_y)

	#TODO: Save only the model and weights, not optimizer state or any of that junk
	"""trained_model.save("./model.h5")

	trained_model.save_weights('./model_weights.h5')
	with open("./model.json", 'w') as file:
		file.write(model.to_json())"""

	trained_model.save("./tf-model", save_format="tf")

	import tensorflow as tf
	model_tf = tf.keras.models.load_model("./model.h5")
示例#16
0
from make_data import make_data


def etccv(n_estimators, min_samples_split):
    return cross_val_score(AdaBoostClassifier(ETC(min_samples_split=int(min_samples_split),
                                                  random_state=2,
                                                  n_jobs=-1),
                                              algorithm="SAMME",
                                              n_estimators=int(n_estimators)),
                           train, train_labels, 'roc_auc', cv=5).mean()


if __name__ == "__main__":
    # Load data set and target values
    train, train_labels, test, test_labels = \
        make_data(train_path ="../input/xtrain_v5_full.csv",
                  test_path="../input/xtest_v5.csv")

    # RF
    etcBO = BayesianOptimization(etccv, {'n_estimators': (200, 800),
                                         'min_samples_split': (2, 8)})
    print('-' * 53)
    etcBO.maximize()
    print('-' * 53)
    print('Final Results')
    print('ETC: %f' % etcBO.res['max']['max_val'])

    # # MAKING SUBMISSION
    rf = cross_val_score(ETC(n_estimators=int(etcBO.res['max']['max_params']['n_estimators']),
                             min_samples_split=int(etcBO.res['max']['max_params']['min_samples_split']),
                             random_state=2,
                             n_jobs=-1),
示例#17
0
    def load_model(self, path):
        """
        :param path: path to model for loading

        Stores saved parameters in self.w
        """
        parameters = np.load(path, allow_pickle=True)
        self.w = parameters


if __name__ == '__main__':
    # Generating data
    n = 1000
    example_nr = 2
    noise = 1
    X, T, x, dim = make_data(example_nr, n, noise)

    # Centering around Origo
    X = X - np.mean(X, axis=0)

    # Training settings
    epochs = 100
    batch_size = 64
    learning_rate = 0.1
    model_saving = {
        'save': {
            'do': True,
            'path': '2_10_10_2_epoch100.npy'
        },
        'load': {
            'do': False,
from plot_corr_normalized     import plot_corr_normalized
from meta_data                import *
from util_files               import read_fit_file
import defines           as df
import define_prior      as dfp
import gvar              as gv
import gvar.dataset      as gvd
import matplotlib.pyplot as plt
import numpy             as np
import sn_minimizer      as snm
import sys

makeData = df.do_makedata_3pt

## -- for raw correlator file input
data,dset = make_data(df.mdp,do_makedata=makeData,\
                      do_db=False,filename="./import-correlators-bar3pt")
## --

inPrefix='v4v4'
inPostfix='t6'
outPrefix='prod3'
tsep=6

cmat = list()
if df.do_irrep == "8":
  op_list = [1,2,3,5,6]
  nameStr = "8p"
elif df.do_irrep == "8'":
  op_list = [4,7]
  nameStr = "8m"
elif df.do_irrep == "16":
示例#19
0

def rfccv(n_estimators, min_samples_split):
    return cross_val_score(RFC(n_estimators=int(n_estimators),
                               min_samples_split=int(min_samples_split),
                               random_state=2,
                               n_jobs=-1),
                           train,
                           train_labels,
                           'roc_auc',
                           cv=5).mean()


if __name__ == "__main__":
    # Load data set and target values
    train, train_labels, test, test_labels = make_data(
        train_path="../input/train.csv", test_path="../input/test.csv")

    # RF
    rfcBO = BayesianOptimization(rfccv, {
        'n_estimators': (600, 800),
        'min_samples_split': (2, 5)
    })
    print('-' * 53)
    rfcBO.maximize()
    print('-' * 53)
    print('Final Results')
    print('RFC: %f' % rfcBO.res['max']['max_val'])

    # # MAKING SUBMISSION
    rf = cross_val_score(RFC(
        n_estimators=int(rfcBO.res['max']['max_params']['n_estimators']),
    #for file_name in os.listdir('pokerdata'):
    for i in xrange(1):
        file_name = 'vali.txt'
        with open(file_name) as f:
            test_file = f.read()
            if '9-max Seat' in test_file:
                continue
        test_file = test_file.replace('\xe2\x82\xac', '$')
        games = re.findall(r'PokerStars Zoom Hand \#.+?FLOP.+?\*\*\* SUMMARY \*\*\*', test_file, re.DOTALL)
        for game in games:
            if not 'Seat 6' in game:
                continue
            c += 1
            if c % 10 == 0:
                print c, '/', len(games)
            game_driver = make_data(game, sys.argv[2])
            game_driver.game_stream(-1)
else:
    with open(sys.argv[1]) as f:
        test_file = f.read()
    games = re.findall(r'PokerStars Zoom Hand \#.+?PokerStars Zoom Hand', test_file, re.DOTALL)
    print len(games)
    random.shuffle(games)
    for game in games:
        while True:
            if re.findall(r'deoxy1909.*folded before Flop', game):
                break
            game = '\n'.join(game.split('\n')[:-3])
            change_terminal_color('green')
            print game.strip('\n')
#       del_stdout_line(len(game.splitlines())+1)
示例#21
0
def learn_signal_prior():
    Ncopy = 100
    SNR = 0.1
    seed = 1
    image_idx = 5
    use_sPCA = True
    r_max = 64
    Freqs_t = np.zeros((0, ))
    Coeff_raw_t = np.zeros((0, 1))
    for image_idx in range(20):
        Coeff, Freqs, rad_freqs, Mean, Phi_ns, sigma, Coeff_raw, rotations = make_data(
            Ncopy=Ncopy,
            SNR=SNR,
            seed=seed,
            image_idx=image_idx,
            sPCA=use_sPCA)
        Freqs_t = np.concatenate([Freqs_t, Freqs], axis=0)
        Coeff_raw_t = np.concatenate([Coeff_raw_t, Coeff_raw], axis=0)

    Freqs = Freqs_t
    Coeff_raw = Coeff_raw_t
    plt.subplot(211)
    plt.scatter(Freqs, np.abs(Coeff_raw), alpha=.5, marker='+')

    loss_vec = []
    x = np.expand_dims(Freqs, axis=-1)
    y = np.abs(Coeff_raw)
    alpha_range = np.arange(1 / 32, 0.3, 1 / 32)
    # alpha_range = [0,0.125]
    for alpha in alpha_range:
        x0 = 0
        A = np.concatenate([np.exp(-alpha * (x - x0))], axis=-1)
        w = np.linalg.pinv(A) @ y
        loss = np.mean((y - w[0] * np.exp(-alpha * (x - x0)))**2)
        loss_vec.append(loss)
        #plt.plot(w[0] * np.exp(-alpha * (np.arange(np.min(Freqs), np.max(Freqs)) - x0)), 'k', alpha=0.1)
    alpha_star = alpha_range[np.argmin(loss_vec)]
    A = np.concatenate([np.exp(-alpha_star * (x - x0))], axis=-1)
    w_opt = np.linalg.pinv(A) @ y
    plt.plot(w[0] * np.exp(-alpha_star *
                           (np.arange(np.min(Freqs), np.max(Freqs)) - x0)),
             'r',
             alpha=0.5)

    plt.xlabel('Angular Frequency')
    plt.ylabel('Absolute Coefficient')
    plt.text(75,
             1,
             r'$C=%.2f e^{%.3f \cdot f}$' % (w_opt, alpha_star),
             bbox=dict(facecolor='yellow', alpha=0.5))

    plt.subplot(212)
    plt.plot(alpha_range, loss_vec)
    plt.xlabel(r'$\alpha$')
    plt.ylabel('loss')
    plt.text(alpha_star - 0.025,
             np.mean(loss_vec),
             r'$\alpha^*=%.3f$' % alpha_star,
             bbox=dict(facecolor='yellow', alpha=0.5))
    plt.tight_layout()
    plt.savefig('2d_figures/fit_signal_prior_.png', bbox_inches="tight")
    plt.savefig('2d_figures/fit_signal_prior_.eps', bbox_inches="tight")
示例#22
0
from sklearn.ensemble import RandomForestClassifier as RFC
from bayesian_optimization import BayesianOptimization
from make_data import make_data


def rfccv(n_estimators, min_samples_split):
    return cross_val_score(RFC(n_estimators=int(n_estimators),
                               min_samples_split=int(min_samples_split),
                               random_state=2,
                               n_jobs=-1),
                           train, train_labels, 'roc_auc', cv=5).mean()


if __name__ == "__main__":
    # Load data set and target values
    train, train_labels, test, test_labels = make_data(train_path = "../input/train.csv", test_path="../input/test.csv")

    # RF
    rfcBO = BayesianOptimization(rfccv, {'n_estimators': (600, 800),
                                         'min_samples_split': (2, 5)})
    print('-' * 53)
    rfcBO.maximize()
    print('-' * 53)
    print('Final Results')
    print('RFC: %f' % rfcBO.res['max']['max_val'])

    # # MAKING SUBMISSION
    rf = cross_val_score(RFC(n_estimators=int(rfcBO.res['max']['max_params']['n_estimators']),
                             min_samples_split=int(rfcBO.res['max']['max_params']['min_samples_split']),
                             random_state=2,
                             n_jobs=-1),
示例#23
0
                         criterion="gini",
                         min_samples_leaf=3,
                         max_depth=15,
                         max_features="auto",
                         oob_score=True,
                         random_state=23)

# Columns of output dataframe
DF_COLS = [
    "n_samples", "n_features", "n_informative", "n_redundant", "n_repeated",
    "n_classes", "n_clusters_per_class", "seed", "mr", "n_diff"
]

all_data = make_data(N_SAMPLES,
                     N_CLASSES,
                     N_FEATURES,
                     N_INFO,
                     N_PER,
                     use_seed=True)


def run(idx):

    print("--Running Job for Index {}--".format(idx))

    # Generate Data
    data = all_data[idx]["data"]
    train_x, train_y = data["train"]
    test_x, test_y = data["test"]

    # Create, fit, and predict a RFC
    initial_clf = RandomForestClassifier(
示例#24
0
from plot_corr_effective_mass import plot_corr_effective_mass
from plot_corr_normalized     import plot_corr_normalized
from meta_data                import *
from util_files               import read_fit_file
import defines           as df
import define_prior      as dfp
import gvar              as gv
import gvar.dataset      as gvd
import matplotlib.pyplot as plt
import numpy             as np
import sys

if df.do_db:
 ## -- for database input
 ##    - (database file name defined in make_data_db.py)
 data,dset = make_data(df.mdp,do_makedata=df.do_makedata,do_db=True)
 models = make_models(data=data,lkey=df.lkey)
 prior = make_prior(models)
else:
 ## -- for raw correlator file input
 data,dset = make_data(df.mdp,do_makedata=df.do_makedata,\
                       do_db=False,filename="./import-correlators-bar2pt")
 data3,dset3 = make_data(df.mdp,do_makedata=df.do_makedata_3pt,\
                         do_db=False,filename="./import-correlators-bar3pt")
 models = make_models(data=data,lkey=df.lkey)
 prior = make_prior(models)
## --

## -- DEPRICATED
#if df.do_uncorr:
# ## -- remove the correlations from the data
import multiprocessing as mp

# Imports from own modules
from make_data import make_data


# ***** Start here *****
pool = mp.Pool(processes=(mp.cpu_count() - 1))
skf = StratifiedKFold(n_splits=5, random_state=42)
scaler = StandardScaler()
np.random.seed(42)

# %% Import transformed Data


X_train, y_train, X_test, y_test = make_data()


# %% Playing with ColumnTransformer and PipeLines


class ModelTransformer(BaseEstimator, TransformerMixin):

    def __init__(self, model, name):
        self.model = model
        self.name = name
        self.features = None

    def fit(self, *args, **kwargs):
        self.model.fit(*args, **kwargs)
        return self
示例#26
0
def etccv(n_estimators, min_samples_split):
    return cross_val_score(AdaBoostClassifier(ETC(
        min_samples_split=int(min_samples_split), random_state=2, n_jobs=-1),
                                              algorithm="SAMME",
                                              n_estimators=int(n_estimators)),
                           train,
                           train_labels,
                           'roc_auc',
                           cv=5).mean()


if __name__ == "__main__":
    # Load data set and target values
    train, train_labels, test, test_labels = \
        make_data(train_path ="../input/xtrain_v5_full.csv",
                  test_path="../input/xtest_v5.csv")

    # RF
    etcBO = BayesianOptimization(etccv, {
        'n_estimators': (200, 800),
        'min_samples_split': (2, 8)
    })
    print('-' * 53)
    etcBO.maximize()
    print('-' * 53)
    print('Final Results')
    print('ETC: %f' % etcBO.res['max']['max_val'])

    # # MAKING SUBMISSION
    rf = cross_val_score(ETC(
        n_estimators=int(etcBO.res['max']['max_params']['n_estimators']),
import define_prior      as dfp
import gvar              as gv
import gvar.dataset      as gvd
#import importlib         as impl ## -- import with variable name
import matplotlib.pyplot as plt
import numpy             as np
#import shutil            as shil ## -- copy files
import util_funcs        as utf

doParallel=True
maxProcesses=6

if df.do_db:
 ## -- for database input
 ##    - (database file name defined in make_data_db.py)
 data,dset = make_data(df.mdp,do_makedata=df.do_makedata,do_db=True)
 #models = make_models(data=data,lkey=df.lkey) ## -- make models in loop
else:
 ## -- for raw correlator file input
 data,dset = make_data(df.mdp,do_makedata=df.do_makedata,\
                       do_db=False,filename="./import-correlators")
## --
mdef = df.define_model

def doProcess(tmin,tmax,data=data,mdef=mdef):
  ## -- do a single fit ... set up for parallelizing
  for key in data:
    mdef[key]['tfit'] = range(tmin,tmax)
  models = make_models(data=data,lkey=df.lkey,mdef=mdef)
  pdict0 = utf.get_prior_dict(df.define_prior,
   df.define_prior['nkey'],df.define_prior['okey'],df.num_nst,df.num_ost)
示例#28
0
plt.plot([0, 0.5 * fs], [np.sqrt(0.5), np.sqrt(0.5)],
         'k--', alpha=0.6, linewidth=1, label=r'$\sqrt{2}/2$')
plt.xlim(0, 0.5*fs)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain')
plt.grid(alpha=0.5)
plt.legend(framealpha=1, shadow=True, loc='best')
plt.title("Amplitude response for\nButterworth bandpass filters", fontsize=10)
plt.text(430, 0.07, "lowcut: %4g Hz\nhighcut: %4g Hz" % (lowcut, highcut),
         fontsize=8)
plt.tight_layout()
plt.savefig("bandpass_example_response.pdf")

T = 0.03
t, x = make_data(T, fs)

y = butter_bandpass_filt(x, lowcut, highcut, fs, order=12)

plt.figure(figsize=(4.0, 2.8))
plt.plot(t, x, 'k', alpha=0.4, linewidth=1, label='Noisy signal')

plt.plot(t, y, 'C0', label='Filtered signal')
plt.xlabel('Time (seconds)')
plt.grid(alpha=0.5)
plt.axis('tight')
plt.xlim(0, T)
plt.legend(framealpha=1, shadow=True, loc='upper left')
plt.tight_layout()
plt.savefig("bandpass_example_signals.pdf")
示例#29
0
def dissemination_RSU():
    #일단은 랜덤함수 쓰고
    #VDI랑 VTI를 구현해서 RTI를 만들기
    #그 값을 계속 변형할 수 있도록 main을 짜기. 알고리즘에 따라
    # VDI - VTI - VT - RTI

    return RTI


###############################  main start ####################################
v1_info = ''
v2_info = ''
v3_info = ''

data = make_data.make_data()

origin_data = data.csv2list()
# origin_packet=''
# print(origin_data)

vehicle_data = data.make_Vdata_list(v1_info, v2_info, v3_info)
# malicious_vehicle(origin_data)
# social_activities(vehicle_data)

#RTI에는 RSU의 모든 데이터가 반영되어 있어야 함. 현재로서는 3대의 차량 정보를 수집했다고 가정
#메인에서 데이터와 데이터 생성 지역 및 시간을 다르게 설정해서 malicious하다고 판단
# RTI: vid, VT, v_position, VT_updateTime
# VDI: vid, VT, data, creationTime, received_data
# VTI: vid, VT, DR, AS, NT, UR, connectivity, VT_updateTime, PVT
示例#30
0
import numpy as np
import json
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras import models, layers, optimizers, losses, metrics
from make_data import make_data

# generate baseline data for the first training of the model
winning_boards, winner_history = make_data(model=None,
                                           iterations=1000,
                                           probability=0)

print()
print(
    '##############################################################################################################################################'
)
print('Winners of the intial random games')
print(f'Player 1: {winner_history[1]["wins"]/10} %')
print(f'Player 2: {winner_history[-1]["wins"]/10} %')
print(f'Tie Game: {winner_history["tie"]["wins"]/10} %')
print(
    '##############################################################################################################################################'
)
print()

all_winning_boards = winning_boards['boards']
all_winning_moves = winning_boards['moves']

for model_iteration in range(5):

    X = np.array(all_winning_boards)
示例#31
0
                         np.arange(y_min, y_max, y_mesh_step_size))
    values = reg.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    values = values.reshape(xx.shape)
    ax.pcolormesh(xx, yy, values, cmap='viridis', vmin=v_min, vmax=v_max)

    # Plot the training points, saving the colormap
    sctr = ax.scatter(feature_1,
                      feature_2,
                      c=y,
                      cmap='viridis',
                      edgecolor='black',
                      lw=0.2)
    ax.set_xlim(xx.min(), xx.max())
    ax.set_ylim(yy.min(), yy.max())

    ax.set_title("Regression predictions (k = {0}, metric = '{1}')".format(
        reg.k, reg.distance.__name__))


if __name__ == "__main__":
    X, y = make_data(n_features=2, n_pts=300, noise=0.1)
    knn = KNNRegressor(k=1, distance=manhattan_distance)
    knn.fit(X, y)
    y_pred = knn.predict(X)
    print(mean_squared_error(y, y_pred))
    fig, ax = plt.subplots()
    plot_predictions(ax, knn, X, y)
    plt.show()
示例#32
0
                        objective="binary:logistic")

    # Run Kfolds on the data model to stop over-fitting
    X_train, X_valid, y_train, y_valid = train_test_split(train,
                                                          train_labels,
                                                          test_size=0.1,
                                                          random_state=seed)
    xgb_model = clf.fit(X_train, y_train, eval_metric="auc", eval_set=[(X_valid, y_valid)], early_stopping_rounds=20)
    y_pred = xgb_model.predict_proba(X_valid)[:,1]

    return auc(y_valid, y_pred)

if __name__ == "__main__":
    # Load data set and target values
    train, train_labels, test, test_labels = \
        make_data(train_path = "../input/xtrain_v6.csv", test_path="../input/xtest_v6.csv")

    xgboostBO = BayesianOptimization(xgboostcv,
                                     {'max_depth': (8, 30),
                                      'learning_rate': (0.8, 0.1),
                                      'n_estimators': (250, 1500),
                                      'gamma': (1., 0.01),
                                      'min_child_weight': (2, 20),
                                      'max_delta_step': (0., 0.3),
                                      'subsample': (0.7, 0.85),
                                      'colsample_bytree': (0.7, 0.85)
                                     })

    xgboostBO.maximize(init_points=7, restarts=50, n_iter=30)
    print('-' * 53)