Example #1
0
def main(_):
    create_dirs([FLAGS.summary_dir])

    config = recurrent_neural_network_config()
    model = RecurrentNeuralNetwork(config)

    sess = tf.Session()

    trainer = RecurrentNeuralNetworkTrainer(sess, model, FLAGS)

    trainer.train()
def main(activation):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)

    os.makedirs('trained_model', exist_ok=True)
    save_path = f'trained_model/{activation}'
    os.makedirs(save_path, exist_ok=True)

    model = RecurrentNeuralNetwork(n_in=1,
                                   n_out=1,
                                   n_hid=200,
                                   device=device,
                                   activation=activation,
                                   sigma=0,
                                   use_bias=True).to(device)

    train_dataset = SineWave(freq_range=51, time_length=40)

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=50,
        num_workers=2,
        shuffle=True,
        worker_init_fn=lambda x: np.random.seed())

    print(model)

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=0.001,
                           weight_decay=0.0001)

    for epoch in range(2001):
        model.train()
        for i, data in enumerate(train_dataloader):
            inputs, target, = data
            inputs, target, = inputs.float(), target.float()
            inputs, target = Variable(inputs).to(device), Variable(target).to(
                device)

            hidden = torch.zeros(50, 200)
            hidden = hidden.to(device)

            optimizer.zero_grad()
            hidden = hidden.detach()
            hidden_list, output, hidden = model(inputs, hidden)

            loss = torch.nn.MSELoss()(output, target)
            loss.backward()
            optimizer.step()

        if epoch > 0 and epoch % 200 == 0:
            print(f'Train Epoch: {epoch}, Loss: {loss.item():.6f}')
            print('output', output[0, :, 0].cpu().detach().numpy())
            print('target', target[0, :, 0].cpu().detach().numpy())
            torch.save(model.state_dict(),
                       os.path.join(save_path, f'epoch_{epoch}.pth'))
Example #3
0
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_3000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 1000
    neural_dynamics = np.zeros((trial_num, 1001, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=1000)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    covariance_matrix = np.zeros((256, 256))
    for i in range(trial_num):
        covariance_matrix += np.outer(neural_dynamics[i, 45],
                                      neural_dynamics[i, 45])

    covariance_matrix = covariance_matrix / trial_num
    w, v = np.linalg.eig(covariance_matrix)

    lin_d = np.sum(w.real)**2 / np.sum(w.real**2)

    pca = PCA()
    pca.fit(neural_dynamics[:, 45, :])

    pc_lin_dim = np.sum(pca.explained_variance_)**2 / np.sum(
        pca.explained_variance_**2)

    print(lin_d, pc_lin_dim)

    lin_dim = 0
    for i in range(trial_num):
        pca = PCA()
        pca.fit(neural_dynamics[i, 25:50, :])

        lin_dim += np.sum(pca.explained_variance_)**2 / np.sum(
            pca.explained_variance_**2)
        # print(lin_dim)
    print(lin_dim / trial_num)
Example #4
0
def main(config_path, sigma_in, signal_length):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('results/', exist_ok=True)
    save_path = f'results/accuracy_w_dropout_noise_2/'
    os.makedirs(save_path, exist_ok=True)

    results_acc = np.zeros((11, 25))

    for acc_idx in range(11):
        # モデルのロード
        torch.manual_seed(1)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        cfg['MODEL']['SIGMA_NEU'] = 0
        model = RecurrentNeuralNetwork(n_in=1, n_out=2, n_hid=cfg['MODEL']['SIZE'], device=device,
                                       alpha_time_scale=0.25, beta_time_scale=cfg['MODEL']['BETA'],
                                       activation=cfg['MODEL']['ACTIVATION'],
                                       sigma_neu=cfg['MODEL']['SIGMA_NEU'],
                                       sigma_syn=cfg['MODEL']['SIGMA_SYN'],
                                       use_bias=cfg['MODEL']['USE_BIAS'],
                                       anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

        model_path = f'trained_model/romo/{model_name}/epoch_{cfg["TRAIN"]["NUM_EPOCH"]}.pth'
        model.load_state_dict(torch.load(model_path, map_location=device))

        model.eval()

        # オリジナルの重み
        original_w_hh = model.w_hh.weight.data.clone()
        # add dropout noise
        dropout_ratio = 0.05 * acc_idx

        for noise_idx in range(25):
            correct = 0
            num_data = 0
            mask = np.random.choice([0, 1], model.n_hid * model.n_hid, p=[dropout_ratio, 1 - dropout_ratio])
            mask = mask.reshape(model.n_hid, model.n_hid)
            torch_mask = torch.from_numpy(mask).float().to(device)
            new_w = torch.mul(original_w_hh, torch_mask)
            model.w_hh.weight = torch.nn.Parameter(new_w, requires_grad=False)
            for delta_idx in range(10):
                while True:
                    delta = np.random.rand() * 8 - 4
                    if abs(delta) >= 1:
                        break
                N = 100
                output_list = np.zeros(N)
                input_signal = romo_signal(delta, N, signal_length, sigma_in)
                input_signal_split = np.split(input_signal, 2)
                for i in range(2):
                    hidden = torch.zeros(50, model.n_hid)
                    hidden = hidden.to(device)
                    inputs = torch.from_numpy(input_signal_split[i]).float()
                    inputs = inputs.to(device)
                    _, outputs, _, _ = model(inputs, hidden)
                    outputs_np = outputs.cpu().detach().numpy()
                    output_list[i * 50: (i + 1) * 50] = np.argmax(outputs_np[:, -1], axis=1)
                num_data += 100
                if delta > 0:
                    ans = 1
                else:
                    ans = 0
                correct += (output_list == ans).sum()
            results_acc[acc_idx, noise_idx] = correct / num_data

    np.save(os.path.join(save_path, f'{model_name}.npy'), results_acc)
def main(config_path):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # save path
    os.makedirs('trained_model', exist_ok=True)
    os.makedirs('trained_model/freq_schedule2', exist_ok=True)
    save_path = f'trained_model/freq_schedule2/{model_name}'
    os.makedirs(save_path, exist_ok=True)

    # copy config file
    shutil.copyfile(config_path,
                    os.path.join(save_path, os.path.basename(config_path)))

    use_cuda = cfg['MACHINE']['CUDA'] and torch.cuda.is_available()
    torch.manual_seed(cfg['MACHINE']['SEED'])
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    if 'ALPHA' not in cfg['MODEL'].keys():
        cfg['MODEL']['ALPHA'] = 0.25

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=cfg['MODEL']['ALPHA'],
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    train_dataset = FreqDataset(
        time_length=cfg['DATALOADER']['TIME_LENGTH'],
        time_scale=cfg['MODEL']['ALPHA'],
        freq_min=cfg['DATALOADER']['FREQ_MIN'],
        freq_max=cfg['DATALOADER']['FREQ_MAX'],
        min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
        signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
        variable_signal_length=cfg['DATALOADER']['VARIABLE_SIGNAL_LENGTH'],
        sigma_in=cfg['DATALOADER']['SIGMA_IN'],
        delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg['TRAIN']['BATCHSIZE'],
        num_workers=2,
        shuffle=True,
        worker_init_fn=lambda x: np.random.seed())

    print(model)
    print('Epoch Loss Acc')

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=cfg['TRAIN']['LR'],
                           weight_decay=cfg['TRAIN']['WEIGHT_DECAY'])
    correct = 0
    num_data = 0
    phase2 = False
    phase3 = False
    phase4 = False
    phase5 = False
    phase6 = False
    if 'PHASE_TRANSIT' in cfg['TRAIN'].keys():
        phase_transition_criteria = cfg['TRAIN']['PHASE_TRANSIT']
    else:
        phase_transition_criteria = [0.5, 0.45, 0.4, 0.3, 0.2]
    for epoch in range(cfg['TRAIN']['NUM_EPOCH'] + 1):
        model.train()
        for i, data in enumerate(train_dataloader):
            inputs, target = data
            # print(inputs.shape)
            inputs, target = inputs.float(), target.long()
            inputs, target = Variable(inputs).to(device), Variable(target).to(
                device)

            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'],
                                 cfg['MODEL']['SIZE'])
            hidden = hidden.to(device)

            optimizer.zero_grad()
            hidden = hidden.detach()
            hidden_list, output, hidden, new_j = model(inputs, hidden)
            # print(output)

            loss = torch.nn.CrossEntropyLoss()(output[:, -1], target)
            dummy_zero = torch.zeros([
                cfg['TRAIN']['BATCHSIZE'],
                int(cfg['DATALOADER']['TIME_LENGTH'] -
                    2 * cfg['DATALOADER']['SIGNAL_LENGTH']),
                cfg['MODEL']['SIZE']
            ]).float().to(device)
            active_norm = torch.nn.MSELoss()(
                hidden_list[:, cfg['DATALOADER']['SIGNAL_LENGTH']:
                            cfg['DATALOADER']['TIME_LENGTH'] -
                            cfg['DATALOADER']['SIGNAL_LENGTH'], :], dummy_zero)

            loss += cfg['TRAIN']['ACTIVATION_LAMBDA'] * active_norm
            loss.backward()
            optimizer.step()
            correct += (np.argmax(
                output[:, -1].cpu().detach().numpy(),
                axis=1) == target.cpu().detach().numpy()).sum().item()
            num_data += target.cpu().detach().numpy().shape[0]

            if not phase2 and float(
                    loss.item()) < phase_transition_criteria[0]:
                cfg['MODEL']['ALPHA'] = 0.2
                cfg['DATALOADER']['TIME_LENGTH'] = 95
                cfg['DATALOADER']['SIGNAL_LENGTH'] = 20
                cfg['DATALOADER']['VARIABLE_DELAY'] = 6

                print("phase2 start! cfg['MODEL']['ALPHA'] = 0.2")
                phase2 = True
                model.change_alpha(cfg['MODEL']['ALPHA'])
                train_dataset = FreqDataset(
                    time_length=cfg['DATALOADER']['TIME_LENGTH'],
                    time_scale=cfg['MODEL']['ALPHA'],
                    freq_min=cfg['DATALOADER']['FREQ_MIN'],
                    freq_max=cfg['DATALOADER']['FREQ_MAX'],
                    min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
                    signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                    variable_signal_length=cfg['DATALOADER']
                    ['VARIABLE_SIGNAL_LENGTH'],
                    sigma_in=cfg['DATALOADER']['SIGMA_IN'],
                    delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataset,
                    batch_size=cfg['TRAIN']['BATCHSIZE'],
                    num_workers=2,
                    shuffle=True,
                    worker_init_fn=lambda x: np.random.seed())
                break

            if not phase3 and float(
                    loss.item()) < phase_transition_criteria[1]:
                cfg['MODEL']['ALPHA'] = 0.175
                cfg['DATALOADER']['TIME_LENGTH'] = 110
                cfg['DATALOADER']['SIGNAL_LENGTH'] = 22
                cfg['DATALOADER']['VARIABLE_DELAY'] = 7

                print("phase3 start! cfg['MODEL']['ALPHA'] = 0.175")
                phase3 = True
                model.change_alpha(cfg['MODEL']['ALPHA'])
                train_dataset = FreqDataset(
                    time_length=cfg['DATALOADER']['TIME_LENGTH'],
                    time_scale=cfg['MODEL']['ALPHA'],
                    freq_min=cfg['DATALOADER']['FREQ_MIN'],
                    freq_max=cfg['DATALOADER']['FREQ_MAX'],
                    min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
                    signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                    variable_signal_length=cfg['DATALOADER']
                    ['VARIABLE_SIGNAL_LENGTH'],
                    sigma_in=cfg['DATALOADER']['SIGMA_IN'],
                    delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataset,
                    batch_size=cfg['TRAIN']['BATCHSIZE'],
                    num_workers=2,
                    shuffle=True,
                    worker_init_fn=lambda x: np.random.seed())
                break

            if not phase4 and float(
                    loss.item()) < phase_transition_criteria[2]:
                cfg['MODEL']['ALPHA'] = 0.15
                cfg['DATALOADER']['TIME_LENGTH'] = 120
                cfg['DATALOADER']['SIGNAL_LENGTH'] = 25
                cfg['DATALOADER']['VARIABLE_DELAY'] = 8

                print("phase4 start! cfg['MODEL']['ALPHA'] = 0.15")
                phase4 = True
                model.change_alpha(cfg['MODEL']['ALPHA'])
                train_dataset = FreqDataset(
                    time_length=cfg['DATALOADER']['TIME_LENGTH'],
                    time_scale=cfg['MODEL']['ALPHA'],
                    freq_min=cfg['DATALOADER']['FREQ_MIN'],
                    freq_max=cfg['DATALOADER']['FREQ_MAX'],
                    min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
                    signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                    variable_signal_length=cfg['DATALOADER']
                    ['VARIABLE_SIGNAL_LENGTH'],
                    sigma_in=cfg['DATALOADER']['SIGMA_IN'],
                    delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataset,
                    batch_size=cfg['TRAIN']['BATCHSIZE'],
                    num_workers=2,
                    shuffle=True,
                    worker_init_fn=lambda x: np.random.seed())
                break

            if not phase5 and float(
                    loss.item()) < phase_transition_criteria[3]:
                cfg['MODEL']['ALPHA'] = 0.10
                cfg['DATALOADER']['TIME_LENGTH'] = 160
                cfg['DATALOADER']['SIGNAL_LENGTH'] = 30
                cfg['DATALOADER']['VARIABLE_DELAY'] = 10

                print("phase5 start! cfg['MODEL']['ALPHA'] = 0.1")
                phase5 = True
                model.change_alpha(cfg['MODEL']['ALPHA'])
                train_dataset = FreqDataset(
                    time_length=cfg['DATALOADER']['TIME_LENGTH'],
                    time_scale=cfg['MODEL']['ALPHA'],
                    freq_min=cfg['DATALOADER']['FREQ_MIN'],
                    freq_max=cfg['DATALOADER']['FREQ_MAX'],
                    min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
                    signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                    variable_signal_length=cfg['DATALOADER']
                    ['VARIABLE_SIGNAL_LENGTH'],
                    sigma_in=cfg['DATALOADER']['SIGMA_IN'],
                    delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataset,
                    batch_size=cfg['TRAIN']['BATCHSIZE'],
                    num_workers=2,
                    shuffle=True,
                    worker_init_fn=lambda x: np.random.seed())
                break

            if not phase6 and float(
                    loss.item()) < phase_transition_criteria[4]:
                cfg['MODEL']['ALPHA'] = 0.075
                cfg['DATALOADER']['TIME_LENGTH'] = 267
                cfg['DATALOADER']['SIGNAL_LENGTH'] = 50
                cfg['DATALOADER']['VARIABLE_DELAY'] = 15

                print("phase6 start! cfg['MODEL']['ALPHA'] = 0.075")
                phase6 = True
                model.change_alpha(cfg['MODEL']['ALPHA'])
                train_dataset = FreqDataset(
                    time_length=cfg['DATALOADER']['TIME_LENGTH'],
                    time_scale=cfg['MODEL']['ALPHA'],
                    freq_min=cfg['DATALOADER']['FREQ_MIN'],
                    freq_max=cfg['DATALOADER']['FREQ_MAX'],
                    min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
                    signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                    variable_signal_length=cfg['DATALOADER']
                    ['VARIABLE_SIGNAL_LENGTH'],
                    sigma_in=cfg['DATALOADER']['SIGMA_IN'],
                    delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])
                train_dataloader = torch.utils.data.DataLoader(
                    train_dataset,
                    batch_size=cfg['TRAIN']['BATCHSIZE'],
                    num_workers=2,
                    shuffle=True,
                    worker_init_fn=lambda x: np.random.seed())
                break

        if epoch % cfg['TRAIN']['DISPLAY_EPOCH'] == 0:
            acc = correct / num_data
            print(f'{epoch}, {loss.item():.6f}, {acc:.6f}')
            print(f'activation norm: {active_norm.item():.4f}, time scale: , '
                  f'{model.alpha.detach().cpu().numpy()[0]:.3f}')
            correct = 0
            num_data = 0
        if epoch > 0 and epoch % cfg['TRAIN']['NUM_SAVE_EPOCH'] == 0:
            torch.save(model.state_dict(),
                       os.path.join(save_path, f'epoch_{epoch}.pth'))
Example #6
0
def main(config_path, sigma_in):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['ALPHA'] = 0.075
    cfg['DATALOADER']['TIME_LENGTH'] = 200
    cfg['DATALOADER']['SIGNAL_LENGTH'] = 50
    cfg['DATALOADER']['VARIABLE_DELAY'] = 15

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('../results/', exist_ok=True)
    save_path = f'../results/accuracy_w_neural_noise/'
    os.makedirs(save_path, exist_ok=True)

    print('sigma_neu accuracy')
    # performanceは1つの学習済みモデルに対してsigma_neu^testを0から0.15まで変えてそれぞれの正解率を記録する。
    results_acc = np.zeros(16)

    for acc_idx in range(16):
        # モデルのロード
        torch.manual_seed(1)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        cfg['MODEL']['SIGMA_NEU'] = 0.01 * acc_idx
        model = RecurrentNeuralNetwork(
            n_in=1,
            n_out=2,
            n_hid=cfg['MODEL']['SIZE'],
            device=device,
            alpha_time_scale=cfg['MODEL']['ALPHA'],
            beta_time_scale=cfg['MODEL']['BETA'],
            activation=cfg['MODEL']['ACTIVATION'],
            sigma_neu=cfg['MODEL']['SIGMA_NEU'],
            sigma_syn=cfg['MODEL']['SIGMA_SYN'],
            use_bias=cfg['MODEL']['USE_BIAS'],
            anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

        model_path = f'../trained_model/freq_schedule/{model_name}/epoch_{cfg["TRAIN"]["NUM_EPOCH"]}.pth'
        model.load_state_dict(torch.load(model_path, map_location=device))

        model.eval()

        correct = 0
        num_data = 0
        # print('delta correct_rate')
        for delta_idx in range(50):
            while True:
                delta = np.random.rand() * 8 - 4
                if abs(delta) >= 1:
                    break
            N = 500
            output_list = np.zeros(N)
            input_signal = romo_signal(delta, N,
                                       cfg['DATALOADER']['TIME_LENGTH'],
                                       cfg['DATALOADER']['SIGNAL_LENGTH'],
                                       sigma_in, cfg['MODEL']['ALPHA'])
            input_signal_split = np.split(input_signal, 10)
            for i in range(10):
                hidden = torch.zeros(50, model.n_hid)
                hidden = hidden.to(device)
                inputs = torch.from_numpy(input_signal_split[i]).float()
                inputs = inputs.to(device)
                hidden_list, outputs, _, _ = model(inputs, hidden)
                outputs_np = outputs.cpu().detach().numpy()
                output_list[i * 50:(i + 1) * 50] = np.argmax(outputs_np[:, -1],
                                                             axis=1)
            num_data += 500
            if delta > 0:
                ans = 1
            else:
                ans = 0
            correct += (output_list == ans).sum()
            if delta_idx % 10 == 0:
                print(delta_idx, f'{delta:.4f}',
                      (output_list == ans).sum() / 500)
            # print(f'{delta:.3f}', (output_list == ans).sum() / 200)

        results_acc[acc_idx] = correct / num_data
        print(cfg['MODEL']['SIGMA_NEU'], correct / num_data)

    np.save(os.path.join(save_path, f'{model_name}.npy'), results_acc)
Example #7
0
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    convergence_list = []
    for epoch in range(500, 3100, 100):
        model = RecurrentNeuralNetwork(n_in=1, n_out=2, n_hid=cfg['MODEL']['SIZE'], device=device,
                                       alpha_time_scale=0.25, beta_time_scale=cfg['MODEL']['BETA'],
                                       activation=cfg['MODEL']['ACTIVATION'],
                                       sigma_neu=cfg['MODEL']['SIGMA_NEU'],
                                       sigma_syn=cfg['MODEL']['SIGMA_SYN'],
                                       use_bias=cfg['MODEL']['USE_BIAS'],
                                       anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
        model_path = f'../trained_model/freq/{model_name}/epoch_{epoch}.pth'
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.eval()

        trial_num = 100
        neural_dynamics = np.zeros((trial_num, 2001, model.n_hid))
        outputs_np = np.zeros(trial_num)
        input_signal, omega_1_list = romo_signal(trial_num, signal_length=15, sigma_in=0.05, time_length=2000)
        input_signal_split = np.split(input_signal, trial_num // cfg['TRAIN']['BATCHSIZE'])

        for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            hidden_list, outputs, _, _ = model(inputs, hidden)
            hidden_list_np = hidden_list.cpu().detach().numpy()
            outputs_np[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                outputs.detach().numpy()[:, -1], axis=1)
            neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

        norm_list = []
        for timepoint in range(15, 45):
            active_norm = np.mean(np.linalg.norm(neural_dynamics[:, timepoint, :], axis=1))
            norm_list.append(active_norm)

        min_norm = np.min(norm_list)

        speed_list = []
        for timepoint in range(40, 50):
            active_speed = np.mean(
                np.linalg.norm(neural_dynamics[:, timepoint, :] - neural_dynamics[:, timepoint-1, :], axis=1))
            speed_list.append(active_speed)

        mean_speed = np.mean(speed_list)

        norm_list = []
        for timepoint in range(40, 50):
            active_norm = np.mean(np.linalg.norm(neural_dynamics[:, timepoint, :], axis=1))
            norm_list.append(active_norm)

        mean_norm = np.mean(norm_list)

        for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            hidden_list, outputs, _, _ = model(inputs, hidden)
            hidden_list_np = hidden_list.cpu().detach().numpy()
            outputs_np[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                outputs.detach().numpy()[:, -1], axis=1)
            neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

        norm_list = []
        for i in range(300):
            norm_list.append(np.linalg.norm(neural_dynamics[0, i + 500, :] - neural_dynamics[0, 500, :]))

        mean_lim_cycle_radius = np.mean(np.linalg.norm(neural_dynamics[:, 500:600, :], axis=2))

        period_list = []
        period = np.argmin(norm_list[1:])

        for i in range(40, 1500):
            period_list.append(
                np.mean(np.linalg.norm(neural_dynamics[:, i, :] - neural_dynamics[:, i + period + 1, :], axis=1)))
        """
        trial_num = 1000
        neural_dynamics = np.zeros((trial_num, 1001, model.n_hid))
        outputs_np = np.zeros(trial_num)
        input_signal, omega_1_list = romo_signal(trial_num, signal_length=15, sigma_in=0.05, time_length=1000)
        input_signal_split = np.split(input_signal, trial_num // cfg['TRAIN']['BATCHSIZE'])

        for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            hidden_list, outputs, _, _ = model(inputs, hidden)
            hidden_list_np = hidden_list.cpu().detach().numpy()
            outputs_np[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                outputs.detach().numpy()[:, -1], axis=1)
            neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

        pca = PCA()
        pca.fit(neural_dynamics[:, 45, :])

        pc_lin_dim = np.sum(pca.explained_variance_) ** 2 / np.sum(pca.explained_variance_ ** 2)
        """

        if np.min(period_list) > 0.05:
            for i in range(len(period_list)):
                # print(period_list[i])
                if period_list[i] < np.min(period_list) * 1.05:
                    print(epoch, min_norm, mean_speed, i + 40, mean_lim_cycle_radius, mean_norm, '!')
                    convergence_list.append(i+40)
                    break
        else:
            for i in range(len(period_list)):
                # print(period_list[i])
                if period_list[i] < 0.05:
                    print(epoch, min_norm, mean_speed, i + 40, mean_lim_cycle_radius, mean_norm)
                    convergence_list.append(i + 40)
                    break

    plt.plot(
        np.arange(500, 3100, 100),
        convergence_list,
        color='coral',
    )
    plt.xlabel('Epoch')
    plt.ylabel('Convergence time')
    plt.savefig('convergence_log.png', dpi=200)
Example #8
0
def main(config_path, sigma_in, signal_length, time_length, epoch):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # モデルのロード
    torch.manual_seed(1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cfg['MODEL']['SIGMA_NEU'] = 0
    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'../trained_model/freq/{model_name}/epoch_{epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    os.makedirs('results/', exist_ok=True)
    save_path = f'results/psychometric_curve/'
    os.makedirs(save_path, exist_ok=True)

    deltas = np.arange(-2, 2, 0.05)
    N = 1000
    score = np.zeros(deltas.shape[0])
    delta_index = 0
    print('delta score')
    for delta in deltas:
        output_list = np.zeros(N)
        input_signal = romo_signal(delta, N, signal_length, sigma_in,
                                   time_length)
        input_signal_split = np.split(input_signal, 4)
        for i in range(4):
            hidden = torch.zeros(250, model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            _, outputs, _, _ = model(inputs, hidden)
            outputs_np = outputs.cpu().detach().numpy()
            output_list[i * 250:(i + 1) * 250] = np.argmax(outputs_np[:, -1],
                                                           axis=1)
        score[delta_index] = np.mean(output_list)
        if delta_index % 5 == 0:
            print(f'{delta:.3f}', np.mean(output_list))
        delta_index += 1
    if sigma_in == 0.05:
        np.save(
            os.path.join(save_path, f'{model_name}_{time_length}_{epoch}.npy'),
            score)
    else:
        np.save(os.path.join(save_path, f'{model_name}_{sigma_in}.npy'), score)
def main(config_path):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    if 'CHECK_TIMING' not in cfg['DATALOADER']:
        cfg['DATALOADER']['CHECKTIMING'] = 5

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # save path
    os.makedirs('trained_model', exist_ok=True)
    os.makedirs('trained_model/static_input', exist_ok=True)
    save_path = f'trained_model/static_input/{model_name}'
    os.makedirs(save_path, exist_ok=True)

    # copy config file
    shutil.copyfile(config_path, os.path.join(save_path, os.path.basename(config_path)))

    use_cuda = cfg['MACHINE']['CUDA'] and torch.cuda.is_available()
    torch.manual_seed(cfg['MACHINE']['SEED'])
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    model = RecurrentNeuralNetwork(n_in=1, n_out=1, n_hid=cfg['MODEL']['SIZE'], device=device,
                                   alpha_time_scale=cfg['MODEL']['ALPHA'],
                                   activation=cfg['MODEL']['ACTIVATION'],
                                   sigma_neu=cfg['MODEL']['SIGMA_NEU'],
                                   use_bias=cfg['MODEL']['USE_BIAS']).to(device)

    train_dataset = StaticInput(time_length=cfg['DATALOADER']['TIME_LENGTH'],
                                time_scale=cfg['MODEL']['ALPHA'],
                                value_min=cfg['DATALOADER']['VALUE_MIN'],
                                value_max=cfg['DATALOADER']['VALUE_MAX'],
                                signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
                                variable_signal_length=cfg['DATALOADER']['VARIABLE_SIGNAL_LENGTH'],
                                sigma_in=cfg['DATALOADER']['SIGMA_IN'])

    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg['TRAIN']['BATCHSIZE'],
                                                   num_workers=2, shuffle=True,
                                                   worker_init_fn=lambda x: np.random.seed())

    print(model)
    print('Epoch Loss')

    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                           lr=cfg['TRAIN']['LR'], weight_decay=cfg['TRAIN']['WEIGHT_DECAY'])
    for epoch in range(cfg['TRAIN']['NUM_EPOCH'] + 1):
        model.train()
        for i, data in enumerate(train_dataloader):
            inputs, target = data
            # print(inputs.shape)
            inputs, target = inputs.float(), target.float()
            inputs, target = Variable(inputs).to(device), Variable(target).to(device)

            hidden_np = np.random.normal(0, 0.5, size=(cfg['TRAIN']['BATCHSIZE'], cfg['MODEL']['SIZE']))
            # hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], cfg['MODEL']['SIZE'])
            if 'RANDOM_START' in cfg['DATALOADER'] and not cfg['DATALOADER']['RANDOM_START']:
                hidden_np = np.zeros((cfg['TRAIN']['BATCHSIZE'], cfg['MODEL']['SIZE']))
            hidden = torch.from_numpy(hidden_np).float()
            hidden = hidden.to(device)

            optimizer.zero_grad()
            hidden = hidden.detach()
            hidden_list, output, hidden = model(inputs, hidden)

            check_timing = np.random.randint(-cfg['DATALOADER']['CHECK_TIMING'], 0)
            loss = torch.nn.MSELoss()(output[:, check_timing], target)
            if 'FIXED_DURATION' in cfg['DATALOADER']:
                for j in range(1, cfg['DATALOADER']['FIXED_DURATION'] + 1):
                    loss += torch.nn.MSELoss()(output[:, check_timing - j], target)
            dummy_zero = torch.zeros([cfg['TRAIN']['BATCHSIZE'],
                                      cfg['DATALOADER']['TIME_LENGTH'] + 1,
                                      cfg['MODEL']['SIZE']]).float().to(device)
            active_norm = torch.nn.MSELoss()(hidden_list, dummy_zero)

            loss += cfg['TRAIN']['ACTIVATION_LAMBDA'] * active_norm
            loss.backward()
            optimizer.step()

        if epoch % cfg['TRAIN']['DISPLAY_EPOCH'] == 0:
            print(f'{epoch}, {loss.item():.4f}')
            print('output: ',
                  output[0, check_timing - cfg['DATALOADER']['FIXED_DURATION']: check_timing, 0].cpu().detach().numpy())
            print('target: ', target[0, 0].cpu().detach().numpy())
        if epoch > 0 and epoch % cfg['TRAIN']['NUM_SAVE_EPOCH'] == 0:
            torch.save(model.state_dict(), os.path.join(save_path, f'epoch_{epoch}.pth'))
Example #10
0
def main(config_path, sigma_in, signal_length, trial_num):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # モデルのロード
    torch.manual_seed(1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cfg['MODEL']['SIGMA_NEU'] = 0
    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'trained_model/romo/{model_name}/epoch_500.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    os.makedirs('results/', exist_ok=True)
    save_path = f'results/freq_correlation/'
    os.makedirs(save_path, exist_ok=True)

    neural_dynamics = np.zeros((trial_num, 61, model.n_hid))
    input_signal, omega_1_list, omega_2_list = romo_signal(
        trial_num, signal_length, sigma_in)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    # メモリを圧迫しないために推論はバッチサイズごとに分けて行う。
    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    # 各neuron index, 時間ごとにcorrelation(Spearmanの順位相関係数)を計算する。
    # 期間はt=10~60の50単位時間にかけて
    omega_1_correlation = np.zeros((50, model.n_hid))
    omega_2_correlation = np.zeros((50, model.n_hid))

    # omega_1
    for time in range(10, 60):
        for neuron_idx in range(model.n_hid):
            spearman_r, _ = spearmanr(neural_dynamics[:, time, neuron_idx],
                                      omega_1_list)
            omega_1_correlation[time - 10, neuron_idx] = spearman_r
    # omega_2
    for time in range(10, 60):
        for neuron_idx in range(model.n_hid):
            spearman_r, _ = spearmanr(neural_dynamics[:, time, neuron_idx],
                                      omega_2_list)
            omega_2_correlation[time - 10, neuron_idx] = spearman_r

    np.save(
        os.path.join(save_path,
                     f'{model_name}_{sigma_in}_{trial_num}_omega_1.npy'),
        omega_1_correlation)
    np.save(
        os.path.join(save_path,
                     f'{model_name}_{sigma_in}_{trial_num}_omega_2.npy'),
        omega_2_correlation)
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_3000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 1000
    signal_length = 15
    sigma_in = 0.05
    deltas = np.arange(-1, 1, 0.05)
    score = np.zeros(deltas.shape[0])
    delta_index = 0
    acc_list = np.zeros(deltas.shape[0])
    for delta in deltas:
        output_list = np.zeros(trial_num)
        input_signal = romo_signal(delta, trial_num, signal_length, sigma_in)
        input_signal_split = np.split(input_signal, 4)
        for i in range(4):
            hidden = torch.zeros(250, model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            _, outputs, _, _ = model(inputs, hidden)
            outputs_np = outputs.cpu().detach().numpy()
            output_list[i * 250:(i + 1) * 250] = np.argmax(outputs_np[:, -1],
                                                           axis=1)
        score[delta_index] = np.mean(output_list)
        if delta > 0:
            acc_list[delta_index] = np.mean(output_list)
        else:
            acc_list[delta_index] = 1 - np.mean(output_list)
        if delta_index % 5 == 0:
            print(f'{delta:.3f}', np.mean(output_list))
        delta_index += 1
    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)

    print(np.mean(acc_list))

    np.save(f'results/{model_name}/score.npy', np.array(score))
    np.save(f'results/{model_name}/acc_list.npy', np.array(acc_list))
    plt.plot(deltas, score, color='orange')
    plt.xlabel(r'$\omega_2-\omega_1$', fontsize=16)
    plt.savefig(f'results/{model_name}/psychometric_curve.png', dpi=200)
Example #12
0
def main(config_path, model_epoch):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    fixed_point_list = np.zeros((200, 256))
    count = 0
    for i in range(50):
        for j in range(4):
            fixed_point_list[count] = np.loadtxt(
                f'../fixed_points/freq/{model_name}_{model_epoch}/fixed_point_{i}_{j}.txt'
            )
            count += 1

    speed_list = []
    for i in range(len(fixed_point_list)):
        speed_list.append(
            calc_speed(torch.from_numpy(fixed_point_list[i]).float(), model))

    print('average speed: ', np.mean(speed_list))

    pca = PCA(n_components=3)
    pca.fit(fixed_point_list)

    pc_fixed_point_list = pca.transform(fixed_point_list)

    fig = plt.figure(figsize=(7, 6))
    ax = Axes3D(fig)
    ax.view_init(elev=25, azim=20)

    # 軸ラベルの設定
    ax.set_xlabel('PC1', fontsize=14)
    ax.set_ylabel('PC2', fontsize=14)
    ax.set_zlabel('PC3', fontsize=14)

    ax.scatter(pc_fixed_point_list[:, 0], pc_fixed_point_list[:, 1],
               pc_fixed_point_list[:, 2])

    os.makedirs(f'results/{model_name}_{model_epoch}', exist_ok=True)
    plt.savefig(f'results/{model_name}_{model_epoch}/fixed_points.png',
                dpi=300)

    fixed_point_list = []
    speed_list = []
    for i in range(50):
        for j in range(4):
            fixed_point = np.loadtxt(
                f'../fixed_points/freq/{model_name}_{model_epoch}/fixed_point_{i}_{j}.txt'
            )
            if calc_speed(torch.from_numpy(fixed_point).float(), model) < 0.02:
                speed_list.append(
                    calc_speed(torch.from_numpy(fixed_point).float(), model))
                fixed_point_list.append(fixed_point)
            # count += 1
    # print(len(speed_list))
    # print(len(fixed_point_list))
    # print(np.argmin(speed_list))
    pc_fixed_point_list = pca.transform(fixed_point_list)
    fig = plt.figure(figsize=(7, 6))
    ax = Axes3D(fig)
    ax.view_init(elev=25, azim=20)

    # 軸ラベルの設定
    ax.set_xlabel('PC1', fontsize=14)
    ax.set_ylabel('PC2', fontsize=14)
    ax.set_zlabel('PC3', fontsize=14)

    ax.scatter(
        pc_fixed_point_list[:, 0],
        pc_fixed_point_list[:, 1],
        pc_fixed_point_list[:, 2],
    )

    ax.scatter(
        pc_fixed_point_list[np.argmin(speed_list), 0],
        pc_fixed_point_list[np.argmin(speed_list), 1],
        pc_fixed_point_list[np.argmin(speed_list), 2],
        s=50,
        color='red',
    )

    plt.title(r'Only speed $\leq 0.02$', fontsize=14)

    plt.savefig(
        f'results/{model_name}_{model_epoch}/fixed_points_slow_dim.png',
        dpi=300)

    jacobian = calc_jacobian(
        torch.from_numpy(fixed_point_list[np.argmin(speed_list)]).float(),
        model)
    w, v = np.linalg.eig(jacobian)
    plt.figure(constrained_layout=True)
    plt.scatter(
        w.real,
        w.imag,
    )
    plt.title(f'Eigenvalue Distribution, {np.max(w.real)}', fontsize=16)
    plt.savefig(f'results/{model_name}_{model_epoch}/fixed_point_eig.png',
                dpi=300)
    print(w[w.real > 0])

    plt.figure(constrained_layout=True)
    plt.scatter(
        w.real,
        w.imag,
    )
    plt.xlim([-2, 0])
    plt.ylim([-1, 1])
    plt.title(f'Eigenvalue Distribution, {np.max(w.real)}', fontsize=16)
    plt.savefig(f'results/{model_name}_{model_epoch}/fixed_point_eig2.png',
                dpi=300)
    print(w[w.real > 0])
def main(activation):
    os.makedirs('figures', exist_ok=True)
    freq_range = 51
    time_length = 40

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RecurrentNeuralNetwork(n_in=1,
                                   n_out=1,
                                   n_hid=200,
                                   device=device,
                                   activation=activation,
                                   sigma=0,
                                   use_bias=True).to(device)

    model_path = f'trained_model/{activation}/epoch_1000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    freq = 17
    const_signal = np.repeat(freq / freq_range + 0.25, time_length)
    const_signal = np.expand_dims(const_signal, axis=1)
    const_signal_tensor = torch.from_numpy(np.array([const_signal]))

    analyzer = FixedPoint(model=model, device=device)

    hidden = torch.zeros(1, 200)
    hidden = hidden.to(device)
    const_signal_tensor = const_signal_tensor.float().to(device)
    with torch.no_grad():
        hidden_list, _, _ = model(const_signal_tensor, hidden)

    fixed_point, _ = analyzer.find_fixed_point(torch.unsqueeze(
        hidden_list[:, 20, :], dim=0).to(device),
                                               const_signal_tensor,
                                               view=True)

    # linear approximation around fixed point
    jacobian = analyzer.calc_jacobian(fixed_point, const_signal_tensor)

    # eigenvalue decomposition
    w, v = np.linalg.eig(jacobian)
    w_real = list()
    w_im = list()
    for eig in w:
        w_real.append(eig.real)
        w_im.append(eig.imag)
    plt.scatter(w_real, w_im)
    plt.xlabel(r'$Re(\lambda)$')
    plt.ylabel(r'$Im(\lambda)$')
    plt.savefig(f'figures/{activation}_eigenvalues.png', dpi=100)

    eig_freq = list()
    dynamics_freq = list()
    for i in range(20):
        freq = np.random.randint(1, freq_range + 1)
        const_signal = np.repeat(freq / freq_range + 0.25, time_length)
        const_signal = np.expand_dims(const_signal, axis=1)
        const_signal_tensor = torch.from_numpy(np.array([const_signal]))

        hidden = torch.zeros(1, 200)
        hidden = hidden.to(device)
        const_signal_tensor = const_signal_tensor.float().to(device)
        with torch.no_grad():
            hidden_list, _, _ = model(const_signal_tensor, hidden)

        fixed_point, result_ok = analyzer.find_fixed_point(
            torch.unsqueeze(hidden_list[:, 20, :], dim=0).to(device),
            const_signal_tensor)
        if not result_ok:
            continue

        jacobian = analyzer.calc_jacobian(fixed_point, const_signal_tensor)
        w, v = np.linalg.eig(jacobian)
        max_index = np.argmax(abs(w))
        eig_freq.append(abs(w[max_index].imag))
        dynamics_freq.append(freq)

    plt.figure()
    plt.scatter(eig_freq, dynamics_freq)
    plt.xlabel(r'$|Im(\lambda_{max})|$')
    plt.ylabel(r'$\omega$')
    plt.title('relationship of frequency')
    plt.savefig(f'figures/freq_{activation}.png', dpi=100)
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_3000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 3000
    neural_dynamics = np.zeros((trial_num, 1001, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=1000)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    train_data, test_data, train_target, test_target = train_test_split(
        neural_dynamics, omega_1_list, test_size=0.25)

    clf_coef_norm = []
    mse_list = []

    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)

    for timepoint in range(15, 500, 10):
        clf = Ridge(alpha=1.0)
        clf.fit(train_data[:, timepoint, :], train_target)

        clf_coef_norm.append(np.linalg.norm(clf.coef_))
        mse = mean_squared_error(
            clf.predict(test_data[:, timepoint, :]),
            test_target,
        )
        mse_list.append(mse)
        print(timepoint, mse)

    plt.figure(constrained_layout=True)
    plt.plot(
        list(range(15, 500, 10)),
        mse_list,
    )
    plt.ylim([0, 1])
    plt.xlabel('timepoint', fontsize=16)
    plt.ylabel('MSE', fontsize=16)

    plt.savefig(f'results/{model_name}/memory_duration.png', dpi=200)
    np.save(f'results/{model_name}/mse_list.npy', np.array(mse_list))
    np.save(f'results/{model_name}/clf_coef_norm.npy', np.array(clf_coef_norm))
    np.save(f'results/{model_name}/omega_1_list.npy', omega_1_list)
def main(config_path, sigma_in, signal_length):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['ALPHA'] = 0.075
    cfg['DATALOADER']['TIME_LENGTH'] = 200
    cfg['DATALOADER']['SIGNAL_LENGTH'] = 50
    cfg['DATALOADER']['VARIABLE_DELAY'] = 15

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('../results/', exist_ok=True)
    save_path = f'../results/accuracy_w_dropout_noise_2/'
    os.makedirs(save_path, exist_ok=True)

    print('sigma_syn accuracy')
    # performanceは1つの学習済みモデルに対してsigma_neu^testを0から0.1まで変えてそれぞれの正解率を記録する。
    results_acc = np.zeros(11)

    for acc_idx in range(11):
        # モデルのロード
        torch.manual_seed(1)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        cfg['MODEL']['SIGMA_NEU'] = 0
        model = RecurrentNeuralNetwork(
            n_in=1,
            n_out=2,
            n_hid=cfg['MODEL']['SIZE'],
            device=device,
            alpha_time_scale=cfg['MODEL']['ALPHA'],
            beta_time_scale=cfg['MODEL']['BETA'],
            activation=cfg['MODEL']['ACTIVATION'],
            sigma_neu=cfg['MODEL']['SIGMA_NEU'],
            sigma_syn=cfg['MODEL']['SIGMA_SYN'],
            use_bias=cfg['MODEL']['USE_BIAS'],
            anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

        model_path = f'../trained_model/freq_schedule/{model_name}/epoch_{cfg["TRAIN"]["NUM_EPOCH"]}.pth'
        model.load_state_dict(torch.load(model_path, map_location=device))

        model.eval()

        # オリジナルの重み
        original_w_hh = model.w_hh.weight.data.clone()
        # weight_mean = np.mean(abs(original_w_hh.cpu().detach().numpy()))

        # add dropout noise
        dropout_ratio = 0.03 * acc_idx
        correct = 0
        num_data = 0
        for noise_idx in range(25):
            mask = np.random.choice([0, 1],
                                    model.n_hid * model.n_hid,
                                    p=[dropout_ratio, 1 - dropout_ratio])
            mask = mask.reshape(model.n_hid, model.n_hid)
            torch_mask = torch.from_numpy(mask).float().to(device)
            new_w = torch.mul(original_w_hh, torch_mask)
            model.w_hh.weight = torch.nn.Parameter(new_w, requires_grad=False)
            for delta_idx in range(10):
                while True:
                    delta = np.random.rand() * 8 - 4
                    if abs(delta) >= 1:
                        break
                N = 100
                output_list = np.zeros(N)
                input_signal = romo_signal(delta, N,
                                           cfg['DATALOADER']['TIME_LENGTH'],
                                           cfg['DATALOADER']['SIGNAL_LENGTH'],
                                           sigma_in, cfg['MODEL']['ALPHA'])
                input_signal_split = np.split(input_signal, 2)
                for i in range(2):
                    hidden = torch.zeros(50, model.n_hid)
                    hidden = hidden.to(device)
                    inputs = torch.from_numpy(input_signal_split[i]).float()
                    inputs = inputs.to(device)
                    _, outputs, _, _ = model(inputs, hidden)
                    outputs_np = outputs.cpu().detach().numpy()
                    output_list[i * 50:(i + 1) * 50] = np.argmax(
                        outputs_np[:, -1], axis=1)
                num_data += 100
                if delta > 0:
                    ans = 1
                else:
                    ans = 0
                correct += (output_list == ans).sum()
                # if delta_idx % 10 == 0:
                #     print(delta_idx, delta, (output_list == ans).sum() / 100)
                # print(f'{delta:.3f}', (output_list == ans).sum() / 200)
            # print(correct / num_data)
        results_acc[acc_idx] = correct / num_data
        print(dropout_ratio, correct / num_data)

    np.save(os.path.join(save_path, f'{model_name}.npy'), results_acc)
def main(config_path, sigma_in, signal_length, alpha):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('results/', exist_ok=True)
    save_path = f'results/w_ridge/'
    os.makedirs(save_path, exist_ok=True)

    # モデルのロード
    torch.manual_seed(1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RecurrentNeuralNetwork(n_in=1, n_out=2, n_hid=cfg['MODEL']['SIZE'], device=device,
                                   alpha_time_scale=0.25, beta_time_scale=cfg['MODEL']['BETA'],
                                   activation=cfg['MODEL']['ACTIVATION'],
                                   sigma_neu=cfg['MODEL']['SIGMA_NEU'],
                                   sigma_syn=cfg['MODEL']['SIGMA_SYN'],
                                   use_bias=cfg['MODEL']['USE_BIAS'],
                                   anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'trained_model/romo/{model_name}/epoch_{cfg["TRAIN"]["NUM_EPOCH"]}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    sample_num = 5000
    neural_dynamics = np.zeros((sample_num, 61, model.n_hid))
    input_signal, omega_1_list, omega_2_list = romo_signal(sample_num, signal_length=signal_length,
                                                           sigma_in=sigma_in)
    input_signal_split = np.split(input_signal, sample_num // cfg['TRAIN']['BATCHSIZE'])

    # メモリを圧迫しないために推論はバッチサイズごとに分けて行う。
    for i in range(sample_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    sample_X_1 = np.zeros([30 * sample_num, model.n_hid])
    sample_X_2 = np.zeros([sample_num, model.n_hid])
    sample_y_1 = np.zeros([30 * sample_num])
    sample_y_2 = np.zeros(sample_num)

    for i in range(sample_num):
        sample_X_1[i * 30: (i + 1) * 30, :] = neural_dynamics[i, 15:45, :] ** 2
        sample_X_2[i, :] = neural_dynamics[i, 55, :] ** 2
        sample_y_1[i * 30: (i + 1) * 30] = omega_1_list[i]
        sample_y_2[i] = omega_2_list[i]

    # 訓練データとテストデータを分離
    train_X_1, test_X_1, train_y_1, test_y_1 = train_test_split(sample_X_1, sample_y_1, random_state=0)
    train_X_2, test_X_2, train_y_2, test_y_2 = train_test_split(sample_X_2, sample_y_2, random_state=0)

    ridge_1 = Ridge(alpha=alpha)
    ridge_1.fit(train_X_1, train_y_1)

    ridge_2 = Ridge(alpha=alpha)
    ridge_2.fit(train_X_2, train_y_2)

    np.save(os.path.join(save_path, f'{model_name}_omega_1.npy'), ridge_1.coef_)
    np.save(os.path.join(save_path, f'{model_name}_omega_2.npy'), ridge_2.coef_)
Example #17
0
def main(config_path, model_epoch):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 1000
    neural_dynamics = np.zeros((trial_num, 61, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=60)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    time_15_mse = 0
    time_45_mse = 0
    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)
    clf_coef_norm_45 = []
    clf_coef_norm_15 = []
    for trial in range(100):
        train_data, test_data, train_target, test_target = train_test_split(
            neural_dynamics, omega_1_list, test_size=0.25)
        timepoint = 45
        """"
        for alpha in [0.00001, 0.0001, 0.001, 0.01, 0.1, 0.5, 1.0]:
            clf = Ridge(alpha=alpha)
            clf.fit(train_data[:, timepoint, :], train_target)
        
            clf_coef_norm.append(np.linalg.norm(clf.coef_))
            mse = mean_squared_error(
                clf.predict(test_data[:, timepoint, :]),
                test_target,
            )
            print(alpha, mse)
         """
        clf = Ridge(alpha=0.01)
        clf.fit(train_data[:, timepoint, :]**2, train_target)
        # print(clf.coef_)

        clf_coef_norm_45.append(np.linalg.norm(clf.coef_))
        mse = mean_squared_error(
            clf.predict(test_data[:, timepoint, :]**2),
            test_target,
        )
        # print(clf.score(test_data[:, timepoint, :], test_target))
        # print(timepoint, mse)
        time_45_mse += mse

        timepoint = 15
        clf = Ridge(alpha=0.01)
        clf.fit(train_data[:, timepoint, :]**2, train_target)
        # print(clf.score(test_data[:, timepoint, :], test_target))

        clf_coef_norm_15.append(np.linalg.norm(clf.coef_))
        mse = mean_squared_error(
            clf.predict(test_data[:, timepoint, :]**2),
            test_target,
        )
        # print(timepoint, mse)
        time_15_mse += mse

    print('average')
    print(f'time: 15, mse: {time_15_mse/100}, {np.mean(clf_coef_norm_15)}')
    print(f'time: 45, mse: {time_45_mse/100}, {np.mean(clf_coef_norm_45)}')
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_3000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 1700
    neural_dynamics = np.zeros((trial_num, 1001, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=1000)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)

    angle_list = []
    diff_omega_list = []
    for i in range(16):
        for j in range(i + 1, 16):
            distance = np.linalg.norm(
                np.mean(neural_dynamics[100 * i:100 * (i + 1), 15], axis=0) -
                np.mean(neural_dynamics[100 * j:100 * (j + 1), 15], axis=0), )
            angle_list.append(distance)
            diff_omega_list.append(abs(i - j) * 0.25)

    plt.figure(constrained_layout=True)
    plt.scatter(
        diff_omega_list,
        angle_list,
    )
    plt.xlabel(r'$|\omega_i - \omega_j|$', fontsize=16)
    plt.ylabel(r'$\theta$', fontsize=16)

    plt.savefig(f'results/{model_name}/coding_distance_Ts.png', dpi=200)
    np.save(f'results/{model_name}/active_norm.npy', np.array(angle_list))

    angle_list = []
    diff_omega_list = []
    for i in range(16):
        for j in range(i + 1, 16):
            distance = np.linalg.norm(
                np.mean(neural_dynamics[100 * i:100 * (i + 1), 45], axis=0) -
                np.mean(neural_dynamics[100 * j:100 * (j + 1), 45], axis=0), )
            angle_list.append(distance)
            diff_omega_list.append(abs(i - j) * 0.25)

    plt.figure(constrained_layout=True)
    plt.scatter(
        diff_omega_list,
        angle_list,
    )
    plt.xlabel(r'$|\omega_i - \omega_j|$', fontsize=16)
    plt.ylabel(r'$\Delta {\bf x}$', fontsize=16)

    plt.savefig(f'results/{model_name}/coding_distance_Tf.png', dpi=200)
Example #19
0
def main(config_path, sigma_in, signal_length, time_point, model_epoch):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('results/', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)
    save_path = f'results/{model_name}/encoding_dimension/'
    os.makedirs(save_path, exist_ok=True)

    # モデルのロード
    torch.manual_seed(1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cfg['MODEL']['SIGMA_NEU'] = 0
    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'../trained_model/freq/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    sample_num = 5000
    neural_dynamics = np.zeros((sample_num, 201, model.n_hid))
    input_signal, omega_1_list, omega_2_list = romo_signal(
        sample_num, signal_length=signal_length, sigma_in=sigma_in)
    input_signal_split = np.split(input_signal,
                                  sample_num // cfg['TRAIN']['BATCHSIZE'])

    # メモリを圧迫しないために推論はバッチサイズごとに分けて行う。
    for i in range(sample_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    sample_X = np.zeros([sample_num, model.n_hid])
    sample_y = np.zeros(sample_num)

    for i in range(sample_num):
        sample_X[i:(i + 1), :] = neural_dynamics[i, time_point, :]
        sample_y[i] = omega_1_list[i]

    correct = 0
    acc_list = np.zeros(300)
    for i in range(300):
        binary_def = np.random.choice([0, 1], 21)
        omega_1_sample = [
            1, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3, 3.2, 3.4, 3.6,
            3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5
        ]
        binary_dict = {}
        for j in range(21):
            binary_dict[omega_1_sample[j]] = binary_def[j]
        label = np.zeros(sample_num)
        for j in range(sample_num):
            if binary_dict[sample_y[j]] == 1:
                label[j] = 1
            else:
                label[j] = 0

        # 訓練データとテストデータを分離
        train_X, test_X, train_label, test_label = train_test_split(
            sample_X, label, random_state=0)

        # 線形SVMのインスタンスを生成
        linear_svc = LinearSVC(random_state=None, max_iter=5000)

        # モデルの学習。fit関数で行う。
        linear_svc.fit(train_X, train_label)

        # テストデータに対する精度
        pred_test = linear_svc.predict(test_X)
        accuracy_test = accuracy_score(test_label, pred_test)
        print('テストデータに対する正解率: %.2f' % accuracy_test)
        if accuracy_test > 0.85:
            correct += 1
        acc_list[i] = accuracy_test

    np.save(os.path.join(save_path, f'correct_ratio.npy'),
            np.array(correct / 300))
    np.save(os.path.join(save_path, f'acc_list.npy'), acc_list)
Example #20
0
def main(config_path, model_epoch):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # save path
    os.makedirs('fixed_points', exist_ok=True)
    os.makedirs('fixed_points/freq', exist_ok=True)
    save_path = f'fixed_points/freq/{model_name}_{model_epoch}'
    os.makedirs(save_path, exist_ok=True)

    # copy config file
    shutil.copyfile(config_path,
                    os.path.join(save_path, os.path.basename(config_path)))

    use_cuda = cfg['MACHINE']['CUDA'] and torch.cuda.is_available()
    torch.manual_seed(cfg['MACHINE']['SEED'])
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    if 'ALPHA' not in cfg['MODEL'].keys():
        cfg['MODEL']['ALPHA'] = 0.25

    # cfg['DATALOADER']['TIME_LENGTH'] = 200
    # cfg['DATALOADER']['SIGNAL_LENGTH'] = 50
    cfg['DATALOADER']['VARIABLE_DELAY'] = 0

    # model load
    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=cfg['MODEL']['ALPHA'],
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'trained_model/freq/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    eval_dataset = FreqDataset(
        time_length=cfg['DATALOADER']['TIME_LENGTH'],
        time_scale=cfg['MODEL']['ALPHA'],
        freq_min=cfg['DATALOADER']['FREQ_MIN'],
        freq_max=cfg['DATALOADER']['FREQ_MAX'],
        min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
        signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
        variable_signal_length=cfg['DATALOADER']['VARIABLE_SIGNAL_LENGTH'],
        sigma_in=cfg['DATALOADER']['SIGMA_IN'],
        delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])

    eval_dataloader = torch.utils.data.DataLoader(
        eval_dataset,
        batch_size=cfg['TRAIN']['BATCHSIZE'],
        num_workers=2,
        shuffle=True,
        worker_init_fn=lambda x: np.random.seed())

    analyzer = FixedPoint(model=model,
                          device=device,
                          alpha=cfg['MODEL']['ALPHA'],
                          max_epochs=140000)

    for trial in range(50):
        for i, data in enumerate(eval_dataloader):
            inputs, target = data
            # print(inputs.shape)
            inputs, target = inputs.float(), target.long()
            inputs, target = Variable(inputs).to(device), Variable(target).to(
                device)

            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'],
                                 cfg['MODEL']['SIZE'])
            hidden = hidden.to(device)

            hidden = hidden.detach()
            hidden_list, output, hidden, _ = model(inputs, hidden)

            # const_signal = torch.tensor([0] * 1)
            # const_signal = const_signal.float().to(device)

            reference_time_point = np.random.randint(35, 55)
            fixed_point, result_ok = analyzer.find_fixed_point(
                hidden_list[0, reference_time_point], view=True)

            fixed_point = fixed_point.detach().cpu().numpy()

            # print(fixed_point)
            # fixed_point_tensor = torch.from_numpy(fixed_point).float()
            # jacobian = analyzer.calc_jacobian(fixed_point_tensor, const_signal)

            # print(np.dot(model.w_out.weight.detach().cpu().numpy(), fixed_point))

            # w, v = np.linalg.eig(jacobian)
            # print('eigenvalues', w)

            np.savetxt(os.path.join(save_path, f'fixed_point_{trial}_{i}.txt'),
                       fixed_point)
def main(config_path, sigma_in, signal_length, model_epoch):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('results_20210203/', exist_ok=True)
    os.makedirs(f'results_20210203/{model_name}', exist_ok=True)
    save_path = f'results_20210203/{model_name}/encoding_dimension_time_series/'
    os.makedirs(save_path, exist_ok=True)

    # モデルのロード
    torch.manual_seed(1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cfg['MODEL']['SIGMA_NEU'] = 0
    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    model_path = f'../trained_model/freq_20210203/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    correct_ratio = np.zeros(6)
    acc_list = np.zeros([6, 200])
    division_num = 7
    # time_sample = np.linspace(25, 45, division_num)
    time_sample = [42, 43, 44, 45, 46, 47, 48]
    # time_sample = [25, 26, 27, 28, 29, 30, 31]
    omega_idx = 0
    for omega_1 in [1, 1.8, 2.6, 3.4, 4.2, 5]:
        sample_num = 100
        neural_dynamics = np.zeros((sample_num, 61, model.n_hid))
        input_signal, omega_2_list = romo_signal_fixed_omega_1(
            omega_1,
            sample_num,
            signal_length=signal_length,
            sigma_in=sigma_in)
        input_signal_split = np.split(input_signal,
                                      sample_num // cfg['TRAIN']['BATCHSIZE'])

        base_1 = np.random.randn(256)
        base_2 = np.random.randn(256)
        base_3 = np.random.randn(256)
        base_4 = np.random.randn(256)
        base_5 = np.random.randn(256)
        base_6 = np.random.randn(256)

        # メモリを圧迫しないために推論はバッチサイズごとに分けて行う。
        for i in range(sample_num // cfg['TRAIN']['BATCHSIZE']):
            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
            hidden = hidden.to(device)
            inputs = torch.from_numpy(input_signal_split[i]).float()
            inputs = inputs.to(device)
            hidden_list, outputs, _, _ = model(inputs, hidden)
            hidden_list_np = hidden_list.cpu().detach().numpy()
            neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                            cfg['TRAIN']['BATCHSIZE']] = hidden_list_np
            # neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']: (i + 1) * cfg['TRAIN']['BATCHSIZE']] = np.array([np.random.randn() * base_1 for i in range(61)])

        sample_X = np.zeros([division_num, model.n_hid])
        sample_y = np.zeros(division_num)

        for i, time in enumerate(time_sample):
            # time = np.random.choice(time_sample)
            sample_X[i, :] = neural_dynamics[0, int(time), :]
            sample_y[i] = int(time)

        # print(sample_y)

        correct = 0
        for i in range(200):
            while True:
                binary_def = np.random.choice([0, 1], division_num)
                # print(binary_def)
                if binary_def.tolist().count(
                        1) > 1 and binary_def.tolist().count(0) > 1:
                    # print(len([binary_def[j] == 0 for j in range(division_num)]))
                    # print(len([binary_def[j] == 1 for j in range(division_num)]))
                    break
            binary_dict = {}
            for j in range(division_num):
                binary_dict[time_sample[j]] = binary_def[j]
            label = np.zeros(division_num)
            for j in range(division_num):
                if binary_dict[sample_y[j]] == 1:
                    label[j] = 1
                else:
                    label[j] = 0

            # 訓練データとテストデータを分離
            # train_X, test_X, train_label, test_label = train_test_split(sample_X, label, random_state=0)

            # 線形SVMのインスタンスを生成
            linear_svc = LinearSVC(random_state=None, max_iter=5000)

            # モデルの学習。fit関数で行う。
            linear_svc.fit(sample_X, label)

            # train dataに対する精度
            pred_train = linear_svc.predict(sample_X)
            accuracy_train = accuracy_score(label, pred_train)

            # print(f'omega_1: {omega_1}, train acc: {accuracy_train:.2f}')

            # テストデータに対する精度
            # pred_test = linear_svc.predict(test_X)
            # accuracy_test = accuracy_score(test_label, pred_test)
            # print(f'omega_1: {omega_1}, train acc: {accuracy_train:.2f}, test acc: {accuracy_test:.2f}')
            if accuracy_train > 0.85:
                correct += 1

            acc_list[omega_idx, i] = accuracy_train

        correct_ratio[omega_idx] = correct / 200
        omega_idx += 1

    np.save(os.path.join(save_path, f'correct_ratio.npy'), correct_ratio)
    np.save(os.path.join(save_path, 'acc_list.npy'), acc_list)

    print(np.mean(acc_list))
def main(activation):
    os.makedirs('figures', exist_ok=True)
    freq_range = 51
    time_length = 40

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RecurrentNeuralNetwork(n_in=1,
                                   n_out=1,
                                   n_hid=200,
                                   device=device,
                                   activation=activation,
                                   sigma=0,
                                   use_bias=True).to(device)

    model_path = f'trained_model/{activation}/epoch_1000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    analyzer = FixedPoint(model=model, device=device)

    # compare fixed points
    freq = 17
    const_signal1 = np.repeat(freq / freq_range + 0.25, time_length)
    const_signal1 = np.expand_dims(const_signal1, axis=1)
    const_signal_tensor1 = torch.from_numpy(np.array([const_signal1]))

    print(const_signal_tensor1.shape)

    hidden = torch.zeros(1, 200)
    hidden = hidden.to(device)
    const_signal_tensor1 = const_signal_tensor1.float().to(device)
    with torch.no_grad():
        hidden_list, _, _ = model(const_signal_tensor1, hidden)

    # different time of same trajectory.
    fixed_point1, _ = analyzer.find_fixed_point(torch.unsqueeze(
        hidden_list[:, 20, :], dim=0).to(device),
                                                const_signal_tensor1,
                                                view=True)
    fixed_point2, _ = analyzer.find_fixed_point(
        torch.unsqueeze(hidden_list[:, 15, :], dim=0).to(device),
        const_signal_tensor1)

    print(
        'distance between 2 fixed point start from different IC; different time of same trajectory.'
    )
    print(torch.norm(fixed_point1 - fixed_point2).item())

    # same time of different trajectories.
    freq = 18
    const_signal2 = np.repeat(freq / freq_range + 0.25, time_length)
    const_signal2 = np.expand_dims(const_signal2, axis=1)
    const_signal_tensor2 = torch.from_numpy(np.array([const_signal2]))

    hidden = torch.zeros(1, 200)
    hidden = hidden.to(device)
    const_signal_tensor2 = const_signal_tensor2.float().to(device)
    with torch.no_grad():
        hidden_list, _, _ = model(const_signal_tensor2, hidden)

    fixed_point3, _ = analyzer.find_fixed_point(
        torch.unsqueeze(hidden_list[:, 20, :], dim=0).to(device),
        const_signal_tensor2)
    print(
        'distance between 2 fixed point start from different IC; same time of different trajectories.'
    )
    print(torch.norm(fixed_point1 - fixed_point3).item())
def main(config_path, model_epoch):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_{model_epoch}.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 100
    neural_dynamics = np.zeros((trial_num, 2001, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=2000)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    norm_list = []
    for i in range(300):
        norm_list.append(
            np.linalg.norm(neural_dynamics[0, i + 500, :] -
                           neural_dynamics[0, 500, :]))

    period_list = []
    period = np.argmin(norm_list[1:])

    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)

    for i in range(40, 1500):
        period_list.append(
            np.mean(
                np.linalg.norm(neural_dynamics[:, i, :] -
                               neural_dynamics[:, i + period + 1, :],
                               axis=1)))

    print(np.min(period_list))
    if np.min(period_list) > 0.05:
        for i in range(len(period_list)):
            # print(period_list[i])
            if period_list[i] < np.min(period_list) * 1.05:
                print(i + 40, '!')
                break
    else:
        for i in range(len(period_list)):
            # print(period_list[i])
            if period_list[i] < 0.05:
                print(i + 40)
                break

    plt.figure(constrained_layout=True)
    plt.plot(period_list)
    plt.xlabel('time', fontsize=16)
    plt.ylabel(r'$|x(t)-x(t+T_L)|$', fontsize=16)
    plt.title(model_name, fontsize=16)

    plt.savefig(f'results/{model_name}/convergence.png', dpi=200)
Example #24
0
def main(config_path):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    # save path
    os.makedirs('trained_model', exist_ok=True)
    os.makedirs('trained_model/freq', exist_ok=True)
    save_path = f'trained_model/freq/{model_name}'
    os.makedirs(save_path, exist_ok=True)

    # copy config file
    shutil.copyfile(config_path,
                    os.path.join(save_path, os.path.basename(config_path)))

    use_cuda = cfg['MACHINE']['CUDA'] and torch.cuda.is_available()
    torch.manual_seed(cfg['MACHINE']['SEED'])
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    if 'ALPHA' not in cfg['MODEL'].keys():
        cfg['MODEL']['ALPHA'] = 0.25

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=cfg['MODEL']['ALPHA'],
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

    train_dataset = FreqDataset(
        time_length=cfg['DATALOADER']['TIME_LENGTH'],
        time_scale=cfg['MODEL']['ALPHA'],
        freq_min=cfg['DATALOADER']['FREQ_MIN'],
        freq_max=cfg['DATALOADER']['FREQ_MAX'],
        min_interval=cfg['DATALOADER']['MIN_INTERVAL'],
        signal_length=cfg['DATALOADER']['SIGNAL_LENGTH'],
        variable_signal_length=cfg['DATALOADER']['VARIABLE_SIGNAL_LENGTH'],
        sigma_in=cfg['DATALOADER']['SIGMA_IN'],
        delay_variable=cfg['DATALOADER']['VARIABLE_DELAY'])

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg['TRAIN']['BATCHSIZE'],
        num_workers=2,
        shuffle=True,
        worker_init_fn=lambda x: np.random.seed())

    print(model)
    print('Epoch Loss Acc')

    optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                  model.parameters()),
                           lr=cfg['TRAIN']['LR'],
                           weight_decay=cfg['TRAIN']['WEIGHT_DECAY'])
    correct = 0
    num_data = 0
    for epoch in range(cfg['TRAIN']['NUM_EPOCH'] + 1):
        model.train()
        for i, data in enumerate(train_dataloader):
            inputs, target = data
            # print(inputs.shape)
            inputs, target = inputs.float(), target.long()
            inputs, target = Variable(inputs).to(device), Variable(target).to(
                device)

            hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'],
                                 cfg['MODEL']['SIZE'])
            hidden = hidden.to(device)

            optimizer.zero_grad()
            hidden = hidden.detach()
            hidden_list, output, hidden, new_j = model(inputs, hidden)
            # print(output)

            loss = torch.nn.CrossEntropyLoss()(output[:, -1], target)
            dummy_zero = torch.zeros([
                cfg['TRAIN']['BATCHSIZE'],
                cfg['DATALOADER']['TIME_LENGTH'] + 1, cfg['MODEL']['SIZE']
            ]).float().to(device)
            active_norm = torch.nn.MSELoss()(hidden_list, dummy_zero)

            loss += cfg['TRAIN']['ACTIVATION_LAMBDA'] * active_norm
            loss.backward()
            optimizer.step()
            correct += (np.argmax(
                output[:, -1].cpu().detach().numpy(),
                axis=1) == target.cpu().detach().numpy()).sum().item()
            num_data += target.cpu().detach().numpy().shape[0]

        if epoch % cfg['TRAIN']['DISPLAY_EPOCH'] == 0:
            acc = correct / num_data
            print(f'{epoch}, {loss.item():.6f}, {acc:.6f}')
            print(active_norm)
            # print('w_hh: ', model.w_hh.weight.cpu().detach().numpy()[:4, :4])
            # print('new_j: ', new_j.cpu().detach().numpy()[0, :4, :4])
            correct = 0
            num_data = 0
        if epoch > 0 and epoch % cfg['TRAIN']['NUM_SAVE_EPOCH'] == 0:
            torch.save(model.state_dict(),
                       os.path.join(save_path, f'epoch_{epoch}.pth'))
Example #25
0
def main(config_path, sigma_in, signal_length):
    # hyper-parameter
    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    model_name = os.path.splitext(os.path.basename(config_path))[0]

    os.makedirs('results/', exist_ok=True)
    save_path = f'results/accuracy_w_synaptic_noise_2/'
    os.makedirs(save_path, exist_ok=True)

    print('sigma_syn accuracy')
    # performanceは1つの学習済みモデルに対してsigma_neu^testを0から0.1まで変えてそれぞれの正解率を記録する。
    results_acc = np.zeros((11, 25))

    for acc_idx in range(11):
        # モデルのロード
        torch.manual_seed(1)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        cfg['MODEL']['SIGMA_NEU'] = 0
        model = RecurrentNeuralNetwork(
            n_in=1,
            n_out=2,
            n_hid=cfg['MODEL']['SIZE'],
            device=device,
            alpha_time_scale=0.25,
            beta_time_scale=cfg['MODEL']['BETA'],
            activation=cfg['MODEL']['ACTIVATION'],
            sigma_neu=cfg['MODEL']['SIGMA_NEU'],
            sigma_syn=cfg['MODEL']['SIGMA_SYN'],
            use_bias=cfg['MODEL']['USE_BIAS'],
            anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)

        model_path = f'trained_model/romo/{model_name}/epoch_{cfg["TRAIN"]["NUM_EPOCH"]}.pth'
        model.load_state_dict(torch.load(model_path, map_location=device))

        model.eval()

        # オリジナルの重み
        original_w_hh = model.w_hh.weight.data.clone()
        weight_mean = np.mean(abs(original_w_hh.cpu().detach().numpy()))

        # add synaptic noise
        sigma_syn = 0.01 * acc_idx
        # 元々のシナプス重みに関してnormalizeする
        sigma_syn = sigma_syn * weight_mean / 0.04

        for noise_idx in range(25):
            correct = 0
            num_data = 0
            synaptic_noise = torch.randn(
                (cfg['MODEL']['SIZE'], cfg['MODEL']['SIZE'])) * sigma_syn
            synaptic_noise = synaptic_noise.to(device)
            new_w = original_w_hh + synaptic_noise
            model.w_hh.weight = torch.nn.Parameter(new_w, requires_grad=False)
            for delta_idx in range(10):
                while True:
                    delta = np.random.rand() * 8 - 4
                    if abs(delta) >= 1:
                        break
                N = 100
                output_list = np.zeros(N)
                input_signal = romo_signal(delta, N, signal_length, sigma_in)
                input_signal_split = np.split(input_signal, 2)
                for i in range(2):
                    hidden = torch.zeros(50, model.n_hid)
                    hidden = hidden.to(device)
                    inputs = torch.from_numpy(input_signal_split[i]).float()
                    inputs = inputs.to(device)
                    _, outputs, _, _ = model(inputs, hidden)
                    outputs_np = outputs.cpu().detach().numpy()
                    output_list[i * 50:(i + 1) * 50] = np.argmax(
                        outputs_np[:, -1], axis=1)
                num_data += 100
                if delta > 0:
                    ans = 1
                else:
                    ans = 0
                correct += (output_list == ans).sum()
                # if delta_idx % 10 == 0:
                #     print(delta_idx, delta, (output_list == ans).sum() / 200)
                # print(f'{delta:.3f}', (output_list == ans).sum() / 200)

            results_acc[acc_idx, noise_idx] = correct / num_data

    np.save(os.path.join(save_path, f'{model_name}.npy'), results_acc)
def main(config_path):
    torch.manual_seed(1)
    device = torch.device('cpu')

    with open(config_path, 'r') as f:
        cfg = yaml.safe_load(f)

    cfg['MODEL']['SIGMA_NEU'] = 0
    model_name = os.path.splitext(os.path.basename(config_path))[0]
    print('model_name: ', model_name)

    model = RecurrentNeuralNetwork(
        n_in=1,
        n_out=2,
        n_hid=cfg['MODEL']['SIZE'],
        device=device,
        alpha_time_scale=0.25,
        beta_time_scale=cfg['MODEL']['BETA'],
        activation=cfg['MODEL']['ACTIVATION'],
        sigma_neu=cfg['MODEL']['SIGMA_NEU'],
        sigma_syn=cfg['MODEL']['SIGMA_SYN'],
        use_bias=cfg['MODEL']['USE_BIAS'],
        anti_hebbian=cfg['MODEL']['ANTI_HEBB']).to(device)
    model_path = f'../trained_model/freq/{model_name}/epoch_3000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    trial_num = 3000
    neural_dynamics = np.zeros((trial_num, 101, model.n_hid))
    outputs_np = np.zeros(trial_num)
    input_signal, omega_1_list = romo_signal(trial_num,
                                             signal_length=15,
                                             sigma_in=0.05,
                                             time_length=100)
    input_signal_split = np.split(input_signal,
                                  trial_num // cfg['TRAIN']['BATCHSIZE'])

    for i in range(trial_num // cfg['TRAIN']['BATCHSIZE']):
        hidden = torch.zeros(cfg['TRAIN']['BATCHSIZE'], model.n_hid)
        hidden = hidden.to(device)
        inputs = torch.from_numpy(input_signal_split[i]).float()
        inputs = inputs.to(device)
        hidden_list, outputs, _, _ = model(inputs, hidden)
        hidden_list_np = hidden_list.cpu().detach().numpy()
        outputs_np[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                   cfg['TRAIN']['BATCHSIZE']] = np.argmax(
                       outputs.detach().numpy()[:, -1], axis=1)
        neural_dynamics[i * cfg['TRAIN']['BATCHSIZE']:(i + 1) *
                        cfg['TRAIN']['BATCHSIZE']] = hidden_list_np

    os.makedirs('results', exist_ok=True)
    os.makedirs(f'results/{model_name}', exist_ok=True)

    speed_list = []

    for timepoint in range(16, 45):
        trajectory_speed = np.mean(
            np.linalg.norm(
                neural_dynamics[:, timepoint, :] -
                neural_dynamics[:, timepoint - 1, :],
                axis=1,
            ))
        speed_list.append(trajectory_speed)
        print(timepoint, trajectory_speed)

    plt.figure(constrained_layout=True)
    plt.plot(
        list(range(16, 45)),
        speed_list,
    )
    plt.xlabel('timepoint', fontsize=16)
    plt.ylabel(r'$|x(t)|$', fontsize=16)

    plt.savefig(f'results/{model_name}/trajectory_speed.png', dpi=200)
    np.save(f'results/{model_name}/trajectory_speed.npy', np.array(speed_list))
Example #27
0
def main(activation):
    os.makedirs('figures', exist_ok=True)
    freq_range = 51
    time_length = 40

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = RecurrentNeuralNetwork(n_in=1, n_out=1, n_hid=200, device=device,
                                   activation=activation, sigma=0, use_bias=True).to(device)

    model_path = f'trained_model/{activation}/epoch_1000.pth'
    model.load_state_dict(torch.load(model_path, map_location=device))

    model.eval()

    analyzer = FixedPoint(model=model, device=device, max_epochs=200000)

    hidden_list_list = np.zeros([30 * time_length, model.n_hid])
    fixed_point_list = np.zeros([15, model.n_hid])
    i = 0
    while i < 15:
        freq = np.random.randint(10, freq_range + 1)
        const_signal = np.repeat(freq / freq_range + 0.25, time_length)
        const_signal = np.expand_dims(const_signal, axis=1)
        const_signal_tensor = torch.from_numpy(np.array([const_signal]))

        hidden = torch.zeros(1, 200)
        hidden = hidden.to(device)
        const_signal_tensor = const_signal_tensor.float().to(device)
        with torch.no_grad():
            hidden_list, _, _ = model(const_signal_tensor, hidden)

        fixed_point, result_ok = analyzer.find_fixed_point(torch.unsqueeze(hidden_list[:, 20, :], dim=0).to(device),
                                                           const_signal_tensor)
        if not result_ok:
            continue

        hidden_list_list[i * time_length:(i + 1) * time_length, ...] = hidden_list.cpu().numpy()[:, ...]
        fixed_point_list[i] = fixed_point.detach().cpu().numpy()
        i += 1

    pca = PCA(n_components=3)
    pca.fit(hidden_list_list)

    fig = plt.figure()
    ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=45, azim=134)

    ax.set_xlabel('PC1')
    ax.set_ylabel('PC2')
    ax.set_zlabel('PC3')

    print(hidden_list_list.shape)
    print(fixed_point_list.shape)
    pc_trajectory = pca.transform(hidden_list_list)
    pc_fixed_point = pca.transform(fixed_point_list)

    for i in range(15):
        ax.plot(pc_trajectory.T[0, i * time_length:(i + 1) * time_length],
                pc_trajectory.T[1, i * time_length:(i + 1) * time_length],
                pc_trajectory.T[2, i * time_length:(i + 1) * time_length], color='royalblue')
    ax.scatter(pc_fixed_point.T[0], pc_fixed_point.T[1], pc_fixed_point.T[2], color='red', marker='x')
    plt.title('trajectory')
    plt.savefig(f'figures/trajectory_{activation}.png', dpi=100)