コード例 #1
0
def main(opt):
    model = LSTM(opt, batch_first=True, dropout=opt.dropout)
    if opt.pre_train:
        model.load_state_dict(torch.load(opt.save_path))
    optimizer = optim.Adam(model.parameters(), opt.learning_rate)
    mseloss = nn.MSELoss()

    dataset = PowerDataset(opt,
                           prepocess_path=opt.prepocess_path,
                           transform=transforms.Compose(
                               [transforms.ToTensor()]))
    train_dataset = data.Subset(dataset, indices=range(8664))
    test_dataset = data.Subset(dataset, indices=range(8664, len(dataset)))
    train_dataloader = data.dataloader.DataLoader(train_dataset,
                                                  num_workers=opt.n_threads,
                                                  batch_size=opt.batch_size,
                                                  shuffle=True)
    test_sampler = data.SequentialSampler(test_dataset)
    test_dataloader = data.dataloader.DataLoader(
        test_dataset,
        num_workers=opt.n_threads,
        batch_size=opt.test_batch_size,
        shuffle=False,
        sampler=test_sampler)

    for e in range(opt.epochs):
        if opt.test_only:
            test(model, test_dataloader)
            break
        print('epoch: ', e)
        train(model, mseloss, optimizer, train_dataloader)
        test(model, test_dataloader)
        torch.save(model.state_dict(), opt.save_path)
コード例 #2
0
    def show_result(self):

        files = os.listdir(self.output)
        for file in files:
            if ".pth" in file:
                path = os.path.join(self.output, file)
                lstm_model = LSTM(self.input_size, self.output_size,
                                  self.nb_neurons)
                lstm_model.load_state_dict(torch.load(path))
                lstm_model.eval()
                print("model : %s loaded" % path)
                predictions = []

                for (x, _) in self.testing_dataloader:
                    if x.shape[0] == self.batch_size:
                        with torch.no_grad():
                            lstm_model.hidden_cell = (
                                torch.zeros(1, self.batch_size,
                                            lstm_model.nb_neurons),
                                torch.zeros(1, self.batch_size,
                                            lstm_model.nb_neurons))
                            output = lstm_model(x.float())
                            output = self.data.unnormalizeData(
                                output).squeeze()
                            predictions += output.tolist()

                plt.plot(predictions, label="prediction")
                plt.plot(self.real_data_test, label="target")
                plt.title(file)
                plt.legend()
                plt.show()
コード例 #3
0
def infer(minmax, data_train, data_test):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # lstm_train_model = LSTM()
    model = LSTM().to(device)
    model.load_state_dict(
        torch.load("D:\stock\weights\checkpont_67.27376310428824.pth"))
    model.eval()
    test_size = len(data_test)
    future_day = test_size
    timestamp = 5
    output_predict = np.zeros(
        (data_train.shape[0] + future_day, data_train.shape[1]))
    output_predict[0] = data_train.iloc[0]
    for k in range(0, (data_train.shape[0] // timestamp) * timestamp,
                   timestamp):
        index = min(k + timestamp, output_predict.shape[0] - 1)
        batch_x = np.expand_dims(df.iloc[k:index, :].values, axis=0)
        batch_y = df.iloc[k + 1:index + 1, :].values
        batch_x = torch.Tensor(batch_x).to(device)
        batch_y = torch.Tensor(batch_y).to(device)
        out_logits = model(batch_x)
        # init_value = last_state
        output_predict[k + 1:k + timestamp +
                       1] = out_logits.cpu().detach().numpy()[0]
    output_predict = minmax.inverse_transform(output_predict)
    return output_predict
コード例 #4
0
ファイル: train_test.py プロジェクト: camel8899/MLDS
def test(test, feature, model, hidden, layer,  output, index2char, index2phone, phone_map, phone2index):
	ans = open(output,'w')
	ans.write('id,phone_sequence\n')
	test_set = Feature_Dataset(feature,'test')
	if feature == 'mfcc':
		feature_dim = 39
	elif feature == 'fbank':
		feature_dim = 69
	elif feature == 'all':
		feature_dim = 108
	
	if model == 'LSTM':
		test_model = LSTM(feature_dim, hidden, layer)
	elif model == 'BiLSTM':
		test_model = LSTM(feature_dim,hidden,layer,bi = True)
	elif model == 'C_RNN':
		group_size = 5
		test_model = C_RNN(group_size, feature_dim, hidden, layer)    
	
	checkpoint = torch.load(test)
	test_model.load_state_dict(checkpoint['model'])
	test_model.eval()
	if USE_CUDA:
		test_model = test_model.cuda()		
	for i in tqdm(range(1,len(test_set)+1)):
		data = test_set[i-1]
		speaker = data[0]
		test_feature = Variable(data[1].float())
		test_hidden = test_model.init_hidden()
		output = torch.max(test_model(test_feature,test_hidden),1)[1]
		result = test_trim(index2char,index2phone, phone_map, phone2index, output.data.cpu().numpy())
		ans.write('{},{}\n'.format(speaker,result))
	ans.close()
コード例 #5
0
ファイル: train.py プロジェクト: sathish142/pytorch
def model_fn(model_dir):
    """Load the PyTorch model from the `model_dir` directory."""
    print("Loading model.")

    # First, load the parameters used to create the model.
    model_info = {}
    model_info_path = os.path.join(model_dir, 'model_info.pth')
    with open(model_info_path, 'rb') as f:
        model_info = torch.load(f)

    print("model_info: {}".format(model_info))

    # Determine the device and construct the model.
    device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
    #model = LSTM(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
    model = LSTM(model_info['num_classes'], model_info['input_size'], model_info['hidden_size'], model_info['num_layers'])

    # Load the stored model parameters.
    model_path = os.path.join(model_dir, 'model.pth')
    with open(model_path, 'rb') as f:
        model.load_state_dict(torch.load(f))

    model.to(device).eval()

    print("Done loading model.")
    return model
コード例 #6
0
def main(opt):
    train_dataset = BADataset(opt.dataroot, opt.L, True, False, False)
    train_dataloader = BADataloader(train_dataset, batch_size=opt.batchSize, \
                                      shuffle=True, num_workers=opt.workers, drop_last=True)

    valid_dataset = BADataset(opt.dataroot, opt.L, False, True, False)
    valid_dataloader = BADataloader(valid_dataset, batch_size=opt.batchSize, \
                                     shuffle=True, num_workers=opt.workers, drop_last=True)

    test_dataset = BADataset(opt.dataroot, opt.L, False, False, True)
    test_dataloader = BADataloader(test_dataset, batch_size=opt.batchSize, \
                                     shuffle=True, num_workers=opt.workers, drop_last=True)

    all_dataset = BADataset(opt.dataroot, opt.L, False, False, False)
    all_dataloader = BADataloader(all_dataset, batch_size=opt.batchSize, \
                                     shuffle=False, num_workers=opt.workers, drop_last=False)

    opt.n_edge_types = train_dataset.n_edge_types
    opt.n_node = train_dataset.n_node
    opt.n_existing_node = all_node_num

    net = LSTM(opt, hidden_state=opt.state_dim*5)
    net.double()
    print(net)

    criterion = nn.CosineSimilarity(dim=1, eps=1e-6)

    if opt.cuda:
        net.cuda()
        criterion.cuda()

    optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    early_stopping = EarlyStopping(patience=opt.patience, verbose=True)

    os.makedirs(OutputDir, exist_ok=True)
    train_loss_ls = []
    valid_loss_ls = []
    test_loss_ls = []

    for epoch in range(0, opt.niter):
        train_loss = train(epoch, train_dataloader, net, criterion, optimizer, opt)
        valid_loss = valid(valid_dataloader, net, criterion, opt)
        test_loss = test(test_dataloader, net, criterion, opt)

        train_loss_ls.append(train_loss)
        valid_loss_ls.append(valid_loss)
        test_loss_ls.append(test_loss)

        early_stopping(valid_loss, net, OutputDir)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    df = pd.DataFrame({'epoch':[i for i in range(1, len(train_loss_ls)+1)], 'train_loss': train_loss_ls, 'valid_loss': valid_loss_ls, 'test_loss': test_loss_ls})
    df.to_csv(OutputDir + '/loss.csv', index=False)

    net.load_state_dict(torch.load(OutputDir + '/checkpoint.pt'))
    inference(all_dataloader, net, criterion, opt, OutputDir)
コード例 #7
0
def validate():

    stock = "MC.PA"
    directory = "/Users/baptiste/Desktop/training"

    input_size = 4
    output_size = 4
    nb_neurons = 200

    test_split = 0.1
    time_window = 5

    dataloader = Data(stock)
    df = dataloader.getData()
    real_data = df.to_numpy()
    df_normalized = dataloader.normalizeData(df)
    df_normalized = torch.FloatTensor(df_normalized.to_numpy())

    test_split = int(test_split * df.shape[0])
    real_test_split = real_data[-test_split:-time_window:, 3]
    testing_split = df_normalized[-test_split:, :]

    files = os.listdir(directory)

    for file in files:
        if ".pth" in file:
            path = os.path.join(directory, file)
            lstm_model = LSTM(input_size, output_size, nb_neurons)
            lstm_model.load_state_dict(torch.load(path))
            print("model : %s loaded" % path)

            lstm_model.eval()

            predictions = []

            for i in range(testing_split.shape[0] - time_window):

                x_test = testing_split[i:i + time_window]

                with torch.no_grad():

                    lstm_model.hidden_cell = (torch.zeros(
                        1, 1, lstm_model.nb_neurons),
                                              torch.zeros(
                                                  1, 1, lstm_model.nb_neurons))
                    predictions.append(
                        dataloader.unnormalizeData(
                            lstm_model(x_test).tolist()))
            predictions = np.array(predictions)[:, 3, 0]

            #plt.figure(15,10)
            plt.plot(real_test_split, label="target")
            plt.plot(predictions, label="prediction")
            plt.title(file)
            plt.legend()
            plt.show()
コード例 #8
0
ファイル: convo.py プロジェクト: rashfarazzaq123/convo
def get_bot_response2():
    try:
        device = torch.device("cpu")

        with open('data2.json', 'r') as instances:
            data = json.load(instances)

        FILE = "dataserialized2.pth"
        dataserialized = torch.load(FILE)

        seq_length = dataserialized["seq_length"]
        input_size = dataserialized["input_size"]
        hidden_size = dataserialized["hidden_size"]
        num_layers = dataserialized["num_layers"]
        num_classes = dataserialized["num_classes"]
        word_list = dataserialized["word_list"]
        tags = dataserialized["tags"]
        model_state = dataserialized["model_state"]

        model = LSTM(seq_length, input_size, hidden_size, num_layers,
                     num_classes).to(device)
        model.load_state_dict(model_state)
        model.eval()
    except Exception as e:
        print(e)
    if request.method == "POST":
        bot = "Convo"
        user_data = request.json

        sentence = user_data['message']  #
        sentence = normalization(sentence)
        sentence = tokenization(sentence)
        x = bag_of_words(sentence, word_list)
        x = torch.from_numpy(x)
        x = x.reshape(-1, x.shape[0])
        x = x.to(device)  # x=torch.tensor(x)# print(x.shape)

        output, hidden = model(x)
        _, predicted = torch.max(output, dim=1)
        tag = tags[predicted.item()]

        prob = torch.softmax(output, dim=1)
        probability = prob[0][predicted.item()]

        if (probability.item() > 0.80):

            for i in data['data']:
                if tag == i['tag']:
                    return jsonify(random.choice(i['bot_responses']))
        else:
            return jsonify("I do not understand...")
コード例 #9
0
def setup_model(model_name, dataset_name, model_path, device):
    """Sets up language-model (LSTM) on device based on its designated filename."""
    device = torch.device(device)
    batch_size = 20
    data_loader = DataLoader(dataset_name, batch_size, device, 70)
    model = LSTM(vocab_size=len(data_loader.corpus.dictionary), device=device, \
                    batch_size=batch_size)

    model_path = os.path.join(model_path, model_name)
    print("loading model state_dict...")
    print("model_path", model_path)
    model.load_state_dict(
        torch.load(model_path, map_location=torch.device(device))['model'])
    return model, data_loader, batch_size
コード例 #10
0
def load_model():
    print("==> loading existing lstm model")
    model_info = torch.load(model_path)
    model = LSTM(input_size=input_size,
                 num_classes=model_info['num_classes'],
                 hidden=model_info['hidden'],
                 num_layers=model_info['num_layers'],
                 mean_after_fc=model_info['mean_after_fc'],
                 mask_empty_frame=model_info['mask_empty_frame'])
    model.cuda()
    model.load_state_dict(model_info['state_dict'])
    best_acc = model_info['best_acc']
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    optimizer.load_state_dict(model_info['optimizer'])
    return (model, optimizer)
        if fixed_pt_quantize:
            lr = 0.003

        optimizer = torch.optim.Adamax(net.parameters(), lr=lr)

        ##############################################PRUNING###########################################################################
        if pruning:
            print(
                "Pruning============================================================================"
            )
            figure_name = "/Subject_" + str(Idx_subject) + "_Finger_" + str(
                Finger) + "_pruning"

            PATH_pre_trained = checkpoint_path + '/s' + str(
                Idx_subject) + '_f' + str(Finger) + '_trained_model'
            net.load_state_dict(torch.load(PATH_pre_trained))
            net.train()
            net.threshold_pruning()
            #train the prunned model:
            try:
                corr_train, corr_val, corr_test = train(TrainX,
                                                        TrainY,
                                                        TestX,
                                                        TestY,
                                                        net,
                                                        lossfunc,
                                                        optimizer,
                                                        num_epoch=10,
                                                        clip=5,
                                                        Finger=Finger)
            except KeyboardInterrupt:
コード例 #12
0
    decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2,
                      out_dim=opt['model']['state_input_dim'])
    decoder = decoder.cuda()
    if len(opt['train']['pretrained']) > 0:
        state_encoder.load_state_dict(
            torch.load(
                os.path.join(opt['train']['pretrained'], 'state_encoder.pkl')))
        offset_encoder.load_state_dict(
            torch.load(
                os.path.join(opt['train']['pretrained'],
                             'offset_encoder.pkl')))
        target_encoder.load_state_dict(
            torch.load(
                os.path.join(opt['train']['pretrained'],
                             'target_encoder.pkl')))
        lstm.load_state_dict(
            torch.load(os.path.join(opt['train']['pretrained'], 'lstm.pkl')))
        decoder.load_state_dict(
            torch.load(os.path.join(opt['train']['pretrained'],
                                    'decoder.pkl')))
        print('generator model loaded')

    if opt['train']['use_adv']:
        short_discriminator = ShortMotionDiscriminator(
            in_dim=(opt['model']['num_joints'] * 3 * 2))
        short_discriminator = short_discriminator.cuda()
        long_discriminator = LongMotionDiscriminator(
            in_dim=(opt['model']['num_joints'] * 3 * 2))
        long_discriminator = long_discriminator.cuda()
        if len(opt['train']['pretrained']) > 0:
            short_discriminator.load_state_dict(
                torch.load(
コード例 #13
0
lstm_crf = LstmCrf(token_vocab,
                   label_vocab,
                   char_vocab,
                   word_embedding=word_embed,
                   char_embedding=char_embed,
                   crf=crf,
                   lstm=lstm,
                   univ_fc_layer=linear,
                   embed_dropout_prob=train_args['feat_dropout'],
                   lstm_dropout_prob=train_args['lstm_dropout'],
                   char_highway=char_hw if train_args['use_highway'] else None)

word_embed.load_state_dict(state['model']['word_embed'])
char_embed.load_state_dict(state['model']['char_embed'])
char_hw.load_state_dict(state['model']['char_hw'])
lstm.load_state_dict(state['model']['lstm'])
crf.load_state_dict(state['model']['crf'])
linear.load_state_dict(state['model']['linear'])
lstm_crf.load_state_dict(state['model']['lstm_crf'])

if use_gpu:
    lstm_crf.cuda()

# Load dataset
logger.info('Loading data')
parser = ConllParser()
test_set = SeqLabelDataset(data_file, parser=parser)
test_set.numberize(token_vocab, label_vocab, char_vocab)
idx_token = {v: k for k, v in token_vocab.items()}
idx_label = {v: k for k, v in label_vocab.items()}
processor = SeqLabelProcessor(gpu=use_gpu)
コード例 #14
0
        torch.load(os.path.join(opt['test']['model_dir'],
                                'state_encoder.pkl')))
    offset_encoder = OffsetEncoder(in_dim=opt['model']['offset_input_dim'])
    offset_encoder = offset_encoder.cuda()
    offset_encoder.load_state_dict(
        torch.load(os.path.join(opt['test']['model_dir'],
                                'offset_encoder.pkl')))
    target_encoder = TargetEncoder(in_dim=opt['model']['target_input_dim'])
    target_encoder = target_encoder.cuda()
    target_encoder.load_state_dict(
        torch.load(os.path.join(opt['test']['model_dir'],
                                'target_encoder.pkl')))
    lstm = LSTM(in_dim=opt['model']['lstm_dim'],
                hidden_dim=opt['model']['lstm_dim'] * 2)
    lstm = lstm.cuda()
    lstm.load_state_dict(
        torch.load(os.path.join(opt['test']['model_dir'], 'lstm.pkl')))
    decoder = Decoder(in_dim=opt['model']['lstm_dim'] * 2,
                      out_dim=opt['model']['state_input_dim'])
    decoder = decoder.cuda()
    decoder.load_state_dict(
        torch.load(os.path.join(opt['test']['model_dir'], 'decoder.pkl')))
    print('model loaded')

    ## get positional code ##
    if opt['test']['use_ztta']:
        ztta = gen_ztta().cuda()
    # print('ztta:', ztta.size())
    # assert 0
    version = opt['test']['version']

    # writer = SummaryWriter(log_dir)
コード例 #15
0
class MelodyGenerator:
    def __init__(self, model_path="model.pt"):
        self.model = LSTM(45, 256, 45)
        self.model.load_state_dict(torch.load(model_path, map_location=device))
        self.sequence_length = 64

        with open("data/dictionary.json", "r") as f:
            self._mappings = json.load(f)

        self.melody = None

    def generate_melody(self, seed, num_steps, max_sequence_length,
                        temperature):
        seed = seed.split()
        melody = seed
        seed = [self._mappings[symbol] for symbol in seed]

        for _ in range(num_steps):
            seed = seed[-max_sequence_length:]
            sequence = MusicDataset.one_hot(np.array(seed),
                                            len(self._mappings))
            sequence = torch.from_numpy(sequence[np.newaxis, ...]).float()

            outputs = self.model(sequence)[0]
            probabilities = outputs.data.detach()
            output_int = self.__sample_with_temperature(
                probabilities, temperature)
            seed.append(output_int)

            output_symbol = [
                k for k, v in self._mappings.items() if v == output_int
            ][0]

            if output_symbol == "/":
                break

            melody.append(output_symbol)

        self.melody = melody
        return melody

    @staticmethod
    def __sample_with_temperature(probabilities, temperature):
        probabilities -= torch.min(probabilities)
        predictions = torch.log(probabilities) / temperature
        probabilities = torch.exp(predictions) / torch.sum(
            torch.exp(predictions))

        choices = range(len(probabilities))
        index = np.random.choice(choices, p=probabilities.numpy())

        return index

    def save_melody(
        self,
        melody=None,
        step_duration=0.25,
        format="midi",
        file_name=f"output/{int(time())}.mid",
    ):
        if melody is None:
            melody = self.melody

        stream = m21.stream.Stream()

        start_symbol = None
        step_counter = 1

        for i, symbol in enumerate(melody):
            if symbol != "_" or i + 1 == len(melody):
                if start_symbol is not None:
                    quarter_length_duration = step_duration * step_counter

                    if start_symbol == "r":
                        m21_event = m21.note.Rest(
                            quarterLength=quarter_length_duration)

                    else:
                        m21_event = m21.note.Note(
                            int(start_symbol),
                            quarterLength=quarter_length_duration)

                    stream.append(m21_event)
                    step_counter = 1

                start_symbol = symbol

            else:
                step_counter += 1

        stream.write(format, file_name)
コード例 #16
0
        output, hidden = model(input, hidden)
        _, topi = output.data.topk(1)
        topi = topi[0][0]
        if topi == char2idx['EOS']:
            break
        else:
            letter = idx2char[topi]
            output_name += letter
        input = inputTensor([topi])

    return output_name


def samples(start_letters='アイウ'):
    for start_letter in start_letters:
        print(sample(start_letter))


def main():
    samples(u'アスナ')


if __name__ == '__main__':
    # load dic
    char2idx = cPickle.load(open("dic.p", "rb"))
    idx2char = {v: k for k, v in char2idx.items()}
    # build model
    model = LSTM(input_dim=len(char2idx), embed_dim=100, hidden_dim=128)
    model.load_state_dict(torch.load('model.pt'))
    main()
コード例 #17
0
        vae.eval()
        del state

    #load pretrained LSTM model
    conv = None
    if opt.conv:
        conv = LSTM(n_mels)
        files = os.listdir(statepath)
        states = [f for f in files if "lstm_" in f]
        states.sort()
        if not len(states) > 0:
            raise Exception("no states for crnn provided!")
        state = os.path.join(statepath, states[-1])
        if os.path.isfile(state):
            state = torch.load(state)
            conv.load_state_dict(state['state_dict'])
        conv.to(device)
        conv.eval()
        del state
    # print(netG)
    # print(netD)

    criterion = nn.BCELoss()

    fixed_noise = None
    if opt.ae:
        fixed_noise = torch.tensor([
            vae.encode(Mset[i].to(device)).detach().cpu().numpy()
            for i in range(1337, 1337 + opt.batchSize)
        ],
                                   dtype=torch.float32)
コード例 #18
0
def main():
    '''
    主要目的為 計算測試資料的 error rate
    '''
    parser = argparse.ArgumentParser()
    # Observed length of the trajectory parameter
    parser.add_argument('--obs_length',
                        type=int,
                        default=240,
                        help='Observed length of the trajectory')
    # Predicted length of the trajectory parameter
    # parser.add_argument('--pred_length', type=int, default=378-60-1,
    #                     help='Predicted length of the trajectory')

    parser.add_argument('--pred_length',
                        type=int,
                        default=240,
                        help='Predicted length of the trajectory')
    # Model to be loaded
    parser.add_argument('--epoch',
                        type=int,
                        default=199,
                        help='Epoch of model to be loaded')
    # cuda support
    parser.add_argument('--use_cuda',
                        action="store_true",
                        default=True,
                        help='Use GPU or not')

    # gru model
    parser.add_argument('--gru',
                        action="store_true",
                        default=False,
                        help='True : GRU cell, False: LSTM cell')
    # method selection
    parser.add_argument(
        '--method',
        type=int,
        default=1,
        help=
        'Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)'
    )

    # Parse the parameters
    sample_args = parser.parse_args()

    # for drive run
    prefix = ''
    f_prefix = '.'

    method_name = "VANILLALSTM"
    model_name = "LSTM"
    save_tar_name = method_name + "_lstm_model_"
    if sample_args.gru:
        model_name = "GRU"
        save_tar_name = method_name + "_gru_model_"

    print("Selected method name: ", method_name, " model name: ", model_name)

    # Save directory
    save_directory = os.path.join(f_prefix, 'model/', method_name, model_name)
    # plot directory for plotting in the future
    plot_directory = os.path.join(f_prefix, 'plot/', method_name, model_name)

    result_directory = os.path.join(f_prefix, 'result/', method_name)
    plot_test_file_directory = 'test'

    # Define the path for the config file for saved args
    with open(os.path.join(save_directory, 'config.pkl'), 'rb') as f:
        saved_args = pickle.load(f)

    seq_lenght = sample_args.pred_length + sample_args.obs_length

    # Create the DataLoader object
    dataloader = DataLoader(f_prefix,
                            1,
                            sample_args.pred_length + sample_args.obs_length,
                            forcePreProcess=True,
                            infer=True)
    create_directories(os.path.join(result_directory, model_name),
                       dataloader.get_all_directory_namelist())
    create_directories(plot_directory, [plot_test_file_directory])
    dataloader.reset_batch_pointer(valid=False)

    dataset_pointer_ins = dataloader.dataset_pointer

    smallest_err = 100000
    smallest_err_iter_num = -1
    origin = (0, 0)
    reference_point = (0, 1)

    submission_store = []  # store submission data points (txt)
    result_store = []  # store points for plotting

    # Initialize net
    net = LSTM(saved_args, True)

    if sample_args.use_cuda:
        net = net.cuda()

    # Get the checkpoint path
    checkpoint_path = os.path.join(
        save_directory, save_tar_name + str(sample_args.epoch) + '.tar')
    if os.path.isfile(checkpoint_path):
        print('Loading checkpoint')
        checkpoint = torch.load(checkpoint_path)
        model_epoch = checkpoint['epoch']
        net.load_state_dict(checkpoint['state_dict'])
        print('Loaded checkpoint at epoch', model_epoch)

    results_it = []
    for iterator in range(50):
        x_seq_arr = []
        ret_x_seq_arr = []
        error_arr = []
        expected_day_arr = []
        predicted_day_arr = []

        total_error = 0

        for batch in range(dataloader.num_batches):
            # Get data
            x, y, d = dataloader.next_batch(randomUpdate=False)

            # Get the sequence
            x_seq, y_seq, d_seq = x[0], y[0], d[0]
            x_seq = np.array(x_seq)
            '''
            x_seq = dataloader.inverse_transform_MinMaxScaler(x_seq)
            print('{}/{}'.format(batch, dataloader.num_batches))
            x_seq[sample_args.obs_length:,-2]= 17
            x_seq[sample_args.obs_length:,-1]= 28
            x_seq = dataloader.fit_transform_MinMaxScaler(x_seq)
            '''
            x_seq = Variable(torch.from_numpy(x_seq).float())

            temp = x_seq[:, -2:]
            # x_seq = x_seq[:,:-2]

            if sample_args.use_cuda:
                x_seq = x_seq.cuda()
                temp = temp.cuda()

            obs_data = x_seq[:sample_args.obs_length]

            ret_x_seq = sample(sample_args, x_seq, temp, net)

            error = get_mean_error(x_seq[sample_args.obs_length:, :-2],
                                   ret_x_seq[sample_args.obs_length:, :-2],
                                   False)
            total_error += error

            # 顯示預測
            # x_seq = result[0]
            x_seq = x_seq.data.cpu().numpy()
            # print(x_seq.size())
            # x_seq = np.reshape(x_seq,(x_seq.shape[0], saved_args.input_size))
            x_seq = dataloader.inverse_transform_MinMaxScaler(x_seq)
            # ret_x_seq = result[1]
            ret_x_seq = ret_x_seq.data.cpu().numpy()
            # ret_x_seq = np.reshape(ret_x_seq,(ret_x_seq.shape[0], saved_args.input_size))
            ret_x_seq = dataloader.inverse_transform_MinMaxScaler(ret_x_seq)

            gt = (x_seq[:, 0] - x_seq[:, 2]) / (x_seq[:, 1] - x_seq[:, 0])
            pred = (ret_x_seq[:, 0] - ret_x_seq[:, 2]) / (ret_x_seq[:, 1] -
                                                          ret_x_seq[:, 0])

            gt2 = gt[sample_args.obs_length:]
            pred2 = pred[sample_args.obs_length:]
            expected_day = np.mean(gt2)
            predicted_day = np.mean(pred2)
            # print(expected_day, predicted_day, expected_day-predicted_day)
            # print('Error: ',error)

            expected_day = np.mean(gt2)
            predicted_day = np.mean(pred2)

            x_seq_arr.append(x_seq)
            ret_x_seq_arr.append(ret_x_seq)
            error_arr.append(error.data.cpu().numpy())
            expected_day_arr.append(expected_day)
            predicted_day_arr.append(predicted_day)

            # fig, axs = plt.subplots(6, 1)
            # axs[0].plot(ret_x_seq[:,0], color = 'blue' , label = 'Predict h1', linestyle='--', marker='^')
            # axs[0].plot(x_seq[:,0], color = 'red', label = 'Real h1', linestyle='-', marker='.')
            # axs[1].plot(ret_x_seq[:,1], color = 'blue' , label = 'Predict h2', linestyle='--', marker='^')
            # axs[1].plot(x_seq[:,1], color = 'red', label = 'Real h2', linestyle='-', marker='.')
            # axs[2].plot(ret_x_seq[:,2], color = 'blue' , label = 'Predict h3', linestyle='--', marker='^')
            # axs[2].plot(x_seq[:,2], color = 'red', label = 'Real h3', linestyle='-', marker='.')
            # axs[3].plot(pred, color = 'blue' , label = 'Predict h3', linestyle='--', marker='^')
            # axs[3].plot(gt, color = 'red', label = 'Real h3', linestyle='-', marker='.')

            # axs[4].plot(ret_x_seq[:,-2], color = 'blue' , label = 'Predict Tevwi', linestyle='--', marker='^')
            # axs[4].plot(x_seq[:,-2], color = 'red', label = 'Real Tevwi', linestyle='-', marker='.')

            # axs[5].plot(ret_x_seq[:,-1], color = 'blue' , label = 'Predict Tcdwi', linestyle='--', marker='^')
            # axs[5].plot(x_seq[:,-1], color = 'red', label = 'Real Tcdwi', linestyle='-', marker='.')

            # for ax in axs:
            #     ax.legend()
            #     ax.grid()
            # plt.show()

        total_error = total_error / dataloader.num_batches
        if total_error < smallest_err:
            print("**********************************************************")
            print('Best iteration has been changed. Previous best iteration: ',
                  smallest_err_iter_num, 'Error: ', smallest_err)
            print('New best iteration : ', iterator, 'Error: ', total_error)
            smallest_err_iter_num = iterator
            smallest_err = total_error

        results_it.append((sample_args.pred_length, sample_args.obs_length,
                           x_seq_arr, ret_x_seq_arr, error_arr))

    dataloader.write_to_plot_file([results_it[smallest_err_iter_num]],
                                  os.path.join(plot_directory,
                                               plot_test_file_directory))
コード例 #19
0
        input[0], input[1] = piq_x_loc, piq_y_loc
        input[2] = velocity(piq_x_loc, piq_y_loc, prev_piq_x_loc,
                            prev_piq_y_loc, timeSince(prev_time))
        input[3], input[4] = (
            piq_x_loc - prev_piq_x_loc), -1 * (piq_y_loc - prev_piq_y_loc)
        input[5] = distanceFromHoop(piq_x_loc, piq_y_loc)
        input[6], input[7] = x_loc, y_loc
        input[8] = velocity(x_loc, y_loc, prev_x_loc, prev_y_loc,
                            timeSince(prev_time))
        input[9], input[10] = (x_loc - prev_x_loc), -1 * (y_loc - prev_y_loc)
        input[11] = distanceFromHoop(x_loc, y_loc)
        input.unsqueeze_(0)
        return input

    rnn = LSTM(12, 300, 2)
    rnn.load_state_dict(
        torch.load('./data/' + args.player_w_underscore + '.model'))

    # --- Screen-clearing code
    screen.fill(WHITE)
    screen.blit(BackGround.image, BackGround.rect)

    # --- Drawing code
    pressed = pygame.key.get_pressed()
    inc = [(0, 0.25), (0, -0.25), (0.4, 0), (-0.4, 0)]
    # update x,y coordinates based on keys pressed
    for i in range(273, 277):
        if pressed[i] == 1:
            prev_x_loc = x_loc
            prev_y_loc = y_loc
            x_loc += inc[i - 273][0]
            y_loc += inc[i - 273][1]
コード例 #20
0
def load_network(network: LSTM, path: str):
    network.load_state_dict(torch.load(path))
コード例 #21
0
ファイル: convo.py プロジェクト: rashfarazzaq123/convo
    FILE = "dataserialized1.pth"
    dataserialized = torch.load(FILE)

    seq_length = dataserialized["seq_length"]
    input_size = dataserialized["input_size"]
    hidden_size = dataserialized["hidden_size"]
    num_layers = dataserialized["num_layers"]
    num_classes = dataserialized["num_classes"]
    word_list = dataserialized["word_list"]
    tags = dataserialized["tags"]
    model_state = dataserialized["model_state"]

    model = LSTM(seq_length, input_size, hidden_size, num_layers,
                 num_classes).to(device)
    model.load_state_dict(model_state)
    model.eval()
except Exception as e:
    print(e)
    history = []

app = Flask(__name__)
CORS(app, resourses={r"/api/*": {"origins": "127.0.0.1:3000"}})
app.config['CORS_HEADERS'] = 'Content-Type'


@app.route('/api')
@cross_origin()
def hello_world():
    return "hello1"
コード例 #22
0
ファイル: run.py プロジェクト: joelmfonseca/COM-503
test_ratio = 0.13  # to get exactly one test sample based on how we built test samples
batch_size = 10

learning_rate = 0.001
look_back = 168
look_ahead = 574

train_loader, test_loader, scaler = build_loader(test_ratio, look_back,
                                                 look_ahead, batch_size)
model = LSTM(batch_size, learning_rate)

resume_training = True
if resume_training:
    # load previous model
    checkpoint = torch.load('saved_models/lstm_adam_b10_lb168_model')
    model.load_state_dict(checkpoint['model_state_dict'])
    model.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    loss = checkpoint['loss']
else:
    epoch = 0
    loss = np.inf

train = False
if train:
    best_loss = (loss, epoch)
    patience = 20
    still_learning = True
    while still_learning:

        # train
コード例 #23
0
def main(train_type=None):
    model_path = './model.pth'
    # dir_path = Path('/home/g19tka13/Downloads/data/3C')
    # data_path = dir_path / 'taskA/train.csv'
    train_data, weighted = strtolist()
    test_data = loadtestdata()
    preudo_list = []
    used_unlabeled_data = None
    unlabeled_data = None
    vocab = None
    if train_type == 'self_train':
        unlabeled_data = pd.read_csv('/home/g19tka13/taskA/aclgenerate.csv',
                                     sep=',')
        unlabeled_data = unlabeled_data.head(3000)
        vocab = load_word_vector(train_data, test_data, 'self_train',
                                 unlabeled_data)
    #     prelabeled_data = None
    #     vocab = load_word_vector(train_data, test_data, 'self_train', used_unlabeled_data)
    #
    #     if len(preudo_list) == 0:  # 判断是否第一次训练模型。
    #         train_iter, val_iter, label_word_id = assemble(train_data, vocab, 1)
    #     else:
    #         train_iter, val_iter, label_word_id = assemble(train_data, vocab, 1, prelabeled_data)  # 加入数据
    else:
        vocab = load_word_vector(train_data, test_data)
    #     train_iter, val_iter, label_word_id = assemble(train_data, vocab, 1)
    # test_iter, unlabel_iter = assemble(test_data, vocab, 0)
    # return train_iter, val_iter, test_iter, vocab, weighted, label_word_id
    best_val_f1 = 0
    if train_type == 'self_train':
        prelabel_data = None
        vocab_size = vocab.vectors.size()
        print('Total num. of words: {}, word vector dimension: {}'.format(
            vocab_size[0], vocab_size[1]))
        model = LSTM(vocab_size[0],
                     vocab_size[1],
                     hidden_size=100,
                     num_layers=2,
                     batch=10)
        model.embedding.weight.data = vocab.vectors
        model.embedding.weight.requires_grad = False
        print(model)
        while len(preudo_list) < 2700:
            class_id = []
            delete_id = []
            if len(preudo_list) == 0:  # 判断是否第一次训练模型。
                train_iter, val_iter, label_word_id = assemble(
                    train_data, vocab, 1)
            else:
                train_iter, val_iter, label_word_id = assemble(
                    train_data, vocab, 1,
                    prelabeled_data=prelabel_data)  # 加入数据

            test_iter, unlabel_iter = assemble(test_data,
                                               vocab,
                                               0,
                                               unlabeled_data=unlabeled_data)
            weight = torch.tensor(weighted)
            train_iter = Data.DataLoader(train_iter,
                                         batch_size=10,
                                         shuffle=True)
            val_iter = Data.DataLoader(val_iter, batch_size=10, shuffle=True)
            test_iter = Data.DataLoader(test_iter,
                                        batch_size=10,
                                        shuffle=False)
            unlabel_iter = Data.DataLoader(unlabel_iter,
                                           batch_size=10,
                                           shuffle=False)
            # vocab_size = vocab.vectors.size()
            # print('Total num. of words: {}, word vector dimension: {}'.format(
            #     vocab_size[0],
            #     vocab_size[1]))
            # model = LSTM(vocab_size[0], vocab_size[1], hidden_size=100, num_layers=2, batch=10)
            # model.embedding.weight.data = vocab.vectors
            # model.embedding.weight.requires_grad = False # 使用已经训练好的词向量, 即保持词向量不更新(固定词向量) 则设置为false
            # print(model)
            # print(model.parameters())
            # for parameter in model.parameters():
            #     print(parameter)
            optimizer = optim.Adam(model.parameters(), lr=0.0005)
            n_epoch = 10
            # nn.CrossEntropyLoss you will give your weights only once while creating the module
            # loss_cs = nn.CrossEntropyLoss(weight=weight)
            # loss_fnc = nn.CosineEmbeddingLoss()
            # loss_mes = nn.MSELoss()
            y = torch.ones(1).long()
            for epoch in range(n_epoch):
                # model.train放在哪参考网址 https://blog.csdn.net/andyL_05/article/details/107004401
                model.train()
                for item_idx, item in enumerate(train_iter, 0):
                    label = item[2]
                    unique_num, count = torch.unique(
                        label, return_counts=True)  # default sorted=True
                    unique_num = unique_num.tolist()
                    # print(unique_num, count)
                    real_weight = torch.ones(6, dtype=torch.float)
                    for i in range(6):
                        if i in unique_num:
                            idx = unique_num.index(i)
                            real_weight[i] = 1 / np.log(1.02 + count[idx] / 10)
                        else:
                            real_weight[i] = 1 / np.log(2.02)
                    optimizer.zero_grad()
                    out = model(item)
                    # label_pred = KMeans(n_clusters=6, init=label_out).fit_predict(out)
                    # fixed weight result=0.1716
                    # loss = F.cross_entropy(out, label.long(), weight=weight)
                    # real time weight calculation
                    loss = F.cross_entropy(out,
                                           label.long(),
                                           weight=real_weight)
                    # nn.CosineEmbeddingLoss() 损失函数需要是二维矩阵,而不是一维的。
                    # loss = loss_fnc(torch.unsqueeze(label_pred, dim=0), torch.unsqueeze(label.long(), dim=0), y)
                    # loss = Variable(loss, requires_grad=True)
                    # loss_MES = loss_mes(out,  label_vector)
                    # loss = loss_fnc(out, torch.Tensor(one_hot), y)
                    loss.backward()
                    # print(model.lstm.all_weights.shape)
                    # print(model.lstm.)
                    optimizer.step()
                    if (item_idx + 1) % 5 == 0:
                        train_value, train_y_pre = torch.max(
                            out, 1
                        )  # max函数有两个返回值(此处out是二维数组)第一个是最大值的list,第二个是值对应的位置
                        # print('train_value', train_value)
                        # acc = torch.mean((torch.tensor(train_y_pre == label.long(), dtype=torch.float)))
                        # print(train_y_pre, label.long())
                        f1 = f1_score(label.long(),
                                      train_y_pre,
                                      average='macro')
                        # print(train_y_pre, label)
                        print(
                            'epoch: %d \t item_idx: %d \t loss: %.4f \t f1: %.4f'
                            % (epoch, item_idx, loss, f1))

                model.eval()  # 跑完一个epoch就评价一次模型
                val_pre_label = []
                val_y_label = []
                # if (epoch+1) % 5 == 0:
                with torch.no_grad():
                    # print(unlabel_iter)
                    # for item in unlabel_iter:  # prelabel
                    #     index = item[2]
                    #     out = model(item)
                    #     out = F.softmax(out, dim=1)
                    #     predict_value, predict_class = torch.max(out, 1)
                    #     print('predict_value', predict_value)
                    #     for i in range(len(predict_value)):
                    #         if predict_value[i] > 0.9:
                    #             delete_id.append(index[i])  # 为了获得数据索引,根据索引从原数据中删除。
                    #             class_id.append(predict_class[i])
                    for item in val_iter:
                        label = item[2]
                        out = model(item)
                        _, val_y_pre = torch.max(out, 1)
                        val_pre_label.extend(val_y_pre)
                        val_y_label.extend(label)
                #         f1 = f1_score(label.long(), val_y_pre, average='macro')
                #         val_f1.append(f1)
                # f1 = np.array(f1).mean()
                f1 = f1_score(torch.Tensor(val_y_label).long(),
                              torch.Tensor(val_pre_label),
                              average='macro')
                print(f1)
                if f1 > best_val_f1:
                    print('val acc: %.4f > %.4f saving model %.4f' %
                          (f1, best_val_f1, len(preudo_list)))
                    torch.save(model.state_dict(), model_path)
                    best_val_f1 = f1
            model.eval()  # 一轮训练结束在创建pseudo-label
            with torch.no_grad():
                for item in unlabel_iter:  # prelabel
                    index = item[2]
                    out = model(item)
                    out = F.softmax(out, dim=1)
                    predict_value, predict_class = torch.max(out, 1)
                    # print('predict_value', predict_value)
                    # print('predict_class', predict_class)
                    for i in range(len(predict_value)):
                        if predict_value[i] > 0.9:
                            delete_id.append(
                                index[i].item())  # 为了获得数据索引,根据索引从原数据中删除。
                            class_id.append(predict_class[i].item())
            preudo_list.extend(delete_id)
            if len(preudo_list) != 0:
                unlabeled_data, prelabel_data = split_unlabeled_data(
                    unlabeled_data, delete_id, class_id, prelabel_data)
    else:
        train_iter, val_iter, label_word_id, label_to_id = assemble(
            train_data, vocab, 1)
        test_iter, unlabel_iter = assemble(test_data, vocab, 0)
        # train_iter, val_iter, test_iter, vocab, weight, label_word_id = load_data()
        weight = torch.tensor(weighted)
        train_iter = Data.DataLoader(train_iter,
                                     batch_size=batch_size,
                                     shuffle=True)
        val_iter = Data.DataLoader(val_iter,
                                   batch_size=batch_size,
                                   shuffle=True)
        test_iter = Data.DataLoader(test_iter,
                                    batch_size=batch_size,
                                    shuffle=False)
        vocab_size = vocab.vectors.size()
        print('Total num. of words: {}, word vector dimension: {}'.format(
            vocab_size[0], vocab_size[1]))
        model = LSTM(vocab_size[0],
                     vocab_size[1],
                     hidden_size=100,
                     num_layers=2,
                     batch=batch_size)
        model.embedding.weight.data = vocab.vectors
        model.embedding.weight.requires_grad = False
        print(model)
        # print(model.parameters())
        # for parameter in model.parameters():
        #     print(parameter)
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        n_epoch = 50
        best_val_f1 = 0
        # nn.CrossEntropyLoss you will give your weights only once while creating the module
        # loss_cs = nn.CrossEntropyLoss(weight=weight)
        loss_fnc = nn.CosineEmbeddingLoss(reduction='mean',
                                          size_average=True,
                                          reduce=True)
        # loss_mes = nn.MSELoss()
        one_list = torch.ones((batch_size, 1), dtype=torch.float)
        zero_list = torch.zeros((batch_size, 1), dtype=torch.float)
        for epoch in range(n_epoch):
            # model.train放在哪参考网址 https://blog.csdn.net/andyL_05/article/details/107004401
            model.train()
            batch_loss = 0
            for item_idx, item in enumerate(train_iter, 0):
                label = item[2]
                unique_num, count = torch.unique(
                    label, return_counts=True)  # default sorted=True
                unique_num = unique_num.tolist()
                # print(unique_num, count)
                real_weight = torch.ones(6, dtype=torch.float)
                for i in range(6):
                    if i in unique_num:
                        idx = unique_num.index(i)
                        real_weight[i] = 1 / np.log(1.02 +
                                                    count[idx] / batch_size)
                    else:
                        real_weight[i] = 1 / np.log(2.02)
                optimizer.zero_grad()
                # out, p_rep, n_rep = model(item, label_to_id)
                out, out_o, label_matrix, out_len, label_id = model(
                    item, label_to_id)
                # label_pred = KMeans(n_clusters=6, init=label_out).fit_predict(out)
                # fixed weight result=0.1716
                # loss = F.cross_entropy(out, label.long(), weight=weight)
                # real time weight calculation
                p_rep, n_rep = confusion(out_o, label_matrix, out_len,
                                         label_id)
                loss1 = F.cross_entropy(out, label.long(), weight=real_weight)
                loss2 = loss_fnc(out, p_rep, one_list)
                loss3 = loss_fnc(out, n_rep, zero_list)
                loss = loss1 + loss2 + loss3
                # batch_loss = batch_loss + +loss2 + loss
                # nn.CosineEmbeddingLoss() 损失函数需要是二维矩阵,而不是一维的。
                # loss = loss_fnc(torch.unsqueeze(label_pred, dim=0), torch.unsqueeze(label.long(), dim=0), y)
                # loss = Variable(loss, requires_grad=True)
                # loss_MES = loss_mes(out,  label_vector)
                # loss = loss_fnc(out, torch.Tensor(one_hot), y)
                loss.backward()
                # print(model.lstm.all_weights.shape)
                # print(model.lstm.)
                optimizer.step()
                if (item_idx + 1) % 5 == 0:
                    _, train_y_pre = torch.max(
                        out,
                        1)  # max函数有两个返回值(此处out是二维数组)第一个是最大值的list,第二个是值对应的位置

                    # acc = torch.mean((torch.tensor(train_y_pre == label.long(), dtype=torch.float)))
                    # print(train_y_pre, label.long())
                    f1 = f1_score(label.long(), train_y_pre, average='macro')
                    # print(train_y_pre, label)
                    print(
                        'epoch: %d \t item_idx: %d \t loss: %.4f \t f1: %.4f' %
                        (epoch, item_idx, loss, f1))
                    # batch_loss = 0
            # finish each epoch val a time
            val_pre_label = []
            val_y_label = []
            # if (epoch + 1) % 5 == 0:
            model.eval()
            with torch.no_grad():
                for item in val_iter:
                    label = item[2]
                    out = model(item)
                    _, val_y_pre = torch.max(out, 1)
                    val_pre_label.extend(val_y_pre)
                    val_y_label.extend(label)
                    # acc = torch.mean((torch.tensor(val_y_pre == label, dtype=torch.float)))
            #         f1 = f1_score(label.long(), val_y_pre, average='macro')
            #         val_f1.append(f1)
            # f1 = np.array(f1).mean()
            f1 = f1_score(torch.Tensor(val_y_label).long(),
                          torch.Tensor(val_pre_label),
                          average='macro')
            print(f1)
            if f1 > best_val_f1:
                print('val acc: %.4f > %.4f saving model' % (f1, best_val_f1))
                torch.save(model.state_dict(), model_path)
                best_val_f1 = f1
    test_f1 = []
    test_pre_label = []
    test_y_label = []
    model_state = torch.load(model_path)
    model.load_state_dict(model_state)
    model.eval()
    with torch.no_grad():
        for item_idx, item in enumerate(test_iter, 0):
            label = item[2]
            out = model(item)
            _, test_pre = torch.max(out, 1)
            test_pre_label.extend(test_pre)
            test_y_label.extend(label)
            # print('test_true_label={} test_pre_label={}'.format(label, test_y_pre))
            # f1 = f1_score(label.long(), test_y_pre, average='macro')
            # test_f1.append(f1)
    final_f1 = f1_score(torch.Tensor(test_y_label).long(),
                        torch.Tensor(test_pre_label),
                        average='macro')
    # final_f1 = np.array(test_f1).mean()
    print('test_pre_label',
          collections.Counter(torch.Tensor(test_pre_label).tolist()))
    print('test_y_label',
          collections.Counter(torch.Tensor(test_y_label).tolist()))
    print('test f1 : %.4f' % final_f1)
    generate_submission(torch.Tensor(test_pre_label).tolist())
    count = {}
    test_pre = torch.Tensor(test_pre_label).tolist()
    test_true = torch.Tensor(test_y_label).tolist()
    c_matrxi = confusion_matrix(test_true, test_pre, labels=[0, 1, 2, 3, 4, 5])
    print(c_matrxi)
    for i in range(len(test_true)):
        if test_true[i] == test_pre[i]:
            if test_true[i] not in count.keys():
                count[test_true[i]] = 1
            else:
                count[test_true[i]] = count[test_true[i]] + 1
    print(count)
    pre_true = pd.DataFrame(columns=['true_id', 'pre_id'])
    test_true_ser = pd.Series(test_true)
    test_pre_ser = pd.Series(test_pre)
    pre_true['true_id'] = test_true_ser
    pre_true['pre_id'] = test_pre_ser
    pre_true.to_csv('/home/g19tka13/taskA/true_predict.csv',
                    sep=',',
                    index=False)
                                dataX.append(_x)
                                dataY.append(_y)

                            train_size = int(len(dataY) * 0.7)
                            dev_size = int((len(dataY) - train_size)*0.5)
                            test_size = int(len(dataY) - train_size - dev_size)


                            testX = torch.Tensor(np.array(dataX[train_size+dev_size:len(dataX)]))
                            testX = Variable(testX)
                            testY = torch.Tensor(np.array(dataY[train_size+dev_size:len(dataX)]))
                            testY = Variable(testY)
                            print(f"../{word}/{count_com[com]}Final_Result_intro/model/{method}_method_model/{way}{per}{model_use}model_{method}.pkl")
                            lstm = LSTM(num_classes, input_size, hidden_size, num_layers)
                                # y_train = model.train()
                            lstm.load_state_dict(torch.load(f"../{word}/{count_com[com]}Final_Result_intro/model/{method}_method_model/{way}{per}{model_use}model_{method}.pkl"))
                            

                            test_predict = lstm(testX)
                            result = count_values(testY,test_predict)
                            
                            record_text.append(f"../{word}/{count_com[com]}Final_Result_intro/model/{method}_method_model/{way}{per}{model_use}model_{method}.pkl")
                            test_result.append(result.item())
                            
                            if way == 'euc' and model_use =='fastText' and method =='add':
                                euc_result_fasttext_add.append(result)
                            elif way == 'cos' and model_use =='fastText' and method =='add':
                                cos_result_fasttext_add.append(result)
                            elif way == 'manha' and model_use =='fastText' and method =='add':
                                manha_result_fasttext_add.append(result)
コード例 #25
0
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)

###########
### START
###########

print('-> CREATE LSTM MODEL')
model = LSTM(input_dim=cfg['model']['data_dim'],
             hidden_dim=cfg['model']['lstm_dim'],
             batch_size=cfg['model']['batch_size'],
             output_dim=cfg['model']['data_dim'],
             num_layers=cfg['model']['lstm_layers'],
             inference=True)

model.load_state_dict(torch.load(args.model_file))
model.eval()

print('-> READ DATA')
with open(args.seed_file, 'rb') as f:
    id_to_sheet = pickle.load(f)
    data = pickle.load(f)

### BOOTSTRAPPING

# get seed sequence
numpy_seed_sequence = data[args.seed_index][:, 130:]
# convert to tensor + add batch dimension
seed_sequence = torch.FloatTensor(numpy_seed_sequence).unsqueeze(0)

print('-> INFERENCE')
コード例 #26
0
ファイル: test.py プロジェクト: wcy1122/SYSU_Undergraduate
def main():
    if dataset == 'train' or dataset == 'val':
        data_dir = './data/train.tsv'
        data_x, data_y = load_data(data_dir,
                                   _type='train',
                                   _sent_only=sent_only)
    else:
        data_dir = './data/test.tsv'
        data_x = load_data(data_dir, _type='test', _sent_only=sent_only)

    print('Load data with size', len(data_x))
    _dict = load_dict(in_dir)
    data_x = word2index(data_x, _dict)
    data_x, lengths = Padding(data_x)

    if dataset == 'train' or dataset == 'val':
        train_x, train_y, train_l, valid_x, valid_y, valid_l = \
            Split_data(data_x, data_y, lengths, 0.8)
        if dataset == 'train':
            data_x, data_y, lengths = train_x, train_y, train_l
        if dataset == 'val':
            data_x, data_y, lengths = valid_x, valid_y, valid_l
    data_set = Data.TensorDataset(data_x, lengths)

    test_loader = Data.DataLoader(dataset=data_set,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=0)
    print('Dataset load done')

    dim_in = len(_dict)
    dim_out = 5
    model = LSTM(dim_in, dim_out, input_size=embed_size,
                 device=device).to(device)
    if _step == 0: model_name = 'best.pth'
    else: model_name = ('model%d.pth' % _step)
    model_dir = os.path.join(os.path.join(in_dir, 'checkpoint'), model_name)
    if device == 'cuda': weight_dict = torch.load(model_dir)
    else: weight_dict = torch.load(model_dir, map_location='cpu')
    model.load_state_dict(weight_dict)
    print('Model load done')

    with torch.no_grad():
        predict_ans = torch.LongTensor(0).to(device)
        model.eval()
        for step, (inputs, lengths) in enumerate(test_loader):
            inputs = inputs.to(device)
            lengths = lengths.to(device)
            outputs = model(inputs, lengths)
            predict = outputs.argmax(dim=1)
            predict_ans = torch.cat((predict_ans, predict))
            if step % 10 == 0:
                print('eval step %d' % step)
    if dataset == 'test':
        write_csv(in_dir, predict_ans)
        print('Test done')
    else:
        diff_matrix = np.zeros([5, 5])
        data_y = np.array(data_y)
        predict_ans = np.array(predict_ans)
        Test_Acc = 0
        for x, y in zip(predict_ans, data_y):
            diff_matrix[x, y] += 1
            Test_Acc += (x == y)
        for i in range(5):
            data_size = len(np.where(data_y == i)[0])
            if data_size > 0:
                diff_matrix[:, i] /= data_size
        diff_matrix[np.where(diff_matrix < 1e-3)] = 0

        print("diff_matrix:")
        print(diff_matrix)
        print(len(data_y), Test_Acc)
        Test_Acc /= len(data_y)

        print("Test_Acc:", Test_Acc)
コード例 #27
0
fig_path = config.path.fig_path
pred_path = config.path.pred_path

seed_everything(seed)

df = get_df(DATA_PATH, columns).reset_index(drop=True)
price = df['Closing_price']
df_len = len(df)

data, _, _, scaler = get_data(DATA_PATH, columns, valid_len)

test_inputs = data[-seq_length:].tolist()

model = LSTM()
model.to(device)
model.load_state_dict(torch.load(model_path))


def main():
    for _ in range(fut_pred):
        seq = torch.FloatTensor(test_inputs[-seq_length:])
        seq = seq.to(device)
        with torch.no_grad():
            model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size).to(device),
                            torch.zeros(1, 1, model.hidden_layer_size).to(device))
            test_inputs.append(model(seq).item())

    pred = scaler.inverse_transform(np.array(test_inputs[-fut_pred:]).reshape(-1, 1))
    with open(pred_path, 'wb') as f:
        pickle.dump(pred, f)
コード例 #28
0
class StockPrediction():
    def __init__(self, stock, time_window, batch_size, learning_rate=0.001):

        self.stock = stock
        self.time_window = time_window
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.input_size = 4
        self.output_size = 1
        self.nb_neurons = 200

        self.prepare_data()
        self.output = "/Users/baptiste/Desktop/training"

    def validate(self):

        self.lstm_model.eval()
        error = []
        loss_function = nn.MSELoss()
        it = iter(self.real_data_dataloader)
        real_data = next(it)
        loss = []
        for i, (x, _) in enumerate(self.testing_dataloader):
            try:
                with torch.no_grad():
                    pred = self.lstm_model(x.float())
                    pred = self.data.unnormalizeData(pred)
                    real_data = real_data.view(-1, 1)
                    error = self.compute_error(error, pred, real_data)
                real_data = next(it)
            except:
                pass
        error_mean = np.mean(error) * 100
        print("Mean error percentage : ", error_mean)
        self.lstm_model.train()

    def compute_error(self, error, pred, target):

        for i in range(self.batch_size):
            error.append(abs(pred[i, 0] - target[i, 0]) / target[i, 0])
        return (error)

    def prepare_data(self):

        validation_split = 0
        test_split = 0.1
        train_split = 1 - validation_split - test_split

        self.data = Data(self.stock)
        df = self.data.getData()
        df_normalized = self.data.normalizeData(df)
        df_normalized = torch.FloatTensor(df_normalized.to_numpy())

        train_split = int(train_split * df.shape[0])
        validation_split = int(validation_split * df.shape[0])
        test_split = int(test_split * df.shape[0])

        training_split = df_normalized[:train_split, :]

        training_data = Dataset(training_split, self.time_window)
        self.training_dataloader = DataLoader(training_data,
                                              batch_size=self.batch_size)

        #testing_data
        real_data_tensor = torch.FloatTensor(df.to_numpy())
        self.real_data_test = torch.FloatTensor(
            real_data_tensor[-test_split:-self.time_window, 3])
        testing_dataset = Dataset(df_normalized[-test_split:, :],
                                  self.time_window)
        self.testing_dataloader = DataLoader(testing_dataset,
                                             batch_size=self.batch_size)
        self.real_data_dataloader = DataLoader(self.real_data_test,
                                               batch_size=self.batch_size)

    def train(self):

        #Model
        self.lstm_model = LSTM(self.input_size, self.output_size,
                               self.nb_neurons)
        self.lstm_model.load_state_dict(
            torch.load("/Users/baptiste/Desktop/training/AAPL_36.pth"))
        loss_function = nn.MSELoss()
        optimizer = torch.optim.Adam(self.lstm_model.parameters(),
                                     lr=self.learning_rate)
        print("Start training")
        for epoch in range(nb_epochs):

            for (x, y) in self.training_dataloader:

                optimizer.zero_grad()
                self.lstm_model.hidden_cell = (torch.zeros(
                    1, self.batch_size, self.lstm_model.nb_neurons),
                                               torch.zeros(
                                                   1, self.batch_size,
                                                   self.lstm_model.nb_neurons))
                pred = self.lstm_model(x.float())
                y = y.view(self.batch_size, 1)
                loss = loss_function(pred, y)
                loss.backward()
                optimizer.step()

            print("epoch n°%s : loss = %s" % (epoch, loss.item()))
            self.validate()
            if epoch % 5 == 1:
                model_name = "%s_%s.pth" % (self.stock, epoch)
                torch.save(self.lstm_model.state_dict(),
                           os.path.join(output_path, model_name))

    def show_result(self):

        files = os.listdir(self.output)
        for file in files:
            if ".pth" in file:
                path = os.path.join(self.output, file)
                lstm_model = LSTM(self.input_size, self.output_size,
                                  self.nb_neurons)
                lstm_model.load_state_dict(torch.load(path))
                lstm_model.eval()
                print("model : %s loaded" % path)
                predictions = []

                for (x, _) in self.testing_dataloader:
                    if x.shape[0] == self.batch_size:
                        with torch.no_grad():
                            lstm_model.hidden_cell = (
                                torch.zeros(1, self.batch_size,
                                            lstm_model.nb_neurons),
                                torch.zeros(1, self.batch_size,
                                            lstm_model.nb_neurons))
                            output = lstm_model(x.float())
                            output = self.data.unnormalizeData(
                                output).squeeze()
                            predictions += output.tolist()

                plt.plot(predictions, label="prediction")
                plt.plot(self.real_data_test, label="target")
                plt.title(file)
                plt.legend()
                plt.show()
コード例 #29
0
def main():
    parse = argparse.ArgumentParser()

    parse.add_argument("--train_data_dir",
                       default='./work/cache/cache_train_data',
                       type=str,
                       required=False)
    parse.add_argument("--dev_data_dir",
                       default='./work/cache/cache_dev_data',
                       type=str,
                       required=False)
    parse.add_argument("--test_data_dir",
                       default='./work/cache/cache_test_data',
                       type=str,
                       required=False)
    parse.add_argument("--src_train_data_dir",
                       default='./work/cache/cache_creative_src_train_data',
                       type=str,
                       required=False)
    parse.add_argument("--output_file",
                       default='creative_deep_model.log',
                       type=str,
                       required=False)
    parse.add_argument("--train_batch_size", default=256, type=int)
    parse.add_argument("--test_batch_size", default=8, type=int)
    parse.add_argument("--train_len", default=900000, type=int)
    parse.add_argument("--sample_rate", default=0.3, type=float)
    parse.add_argument("--random_state", default=1017, type=float)
    parse.add_argument("--do_train",
                       default=False,
                       action="store_true",
                       help="Whether to run training.")
    parse.add_argument("--do_test",
                       default=True,
                       action="store_true",
                       help="Whether to run testing.")
    parse.add_argument("--learnning_rate", default=5e-4, type=float)
    parse.add_argument("--num_epoch", default=5, type=int)
    parse.add_argument("--out_step", default=200, type=int)
    parse.add_argument("--max_vocab_size", default=1000000, type=int)
    parse.add_argument("--min_freq", default=2, type=int)
    parse.add_argument("--embed_size", default=300, type=int)
    parse.add_argument("--hidden_size", default=256, type=int)
    parse.add_argument("--dropout_rate", default=0.2, type=float)
    parse.add_argument("--warmup_steps",
                       default=100,
                       type=int,
                       help="Linear warmup over warmup_steps.")
    parse.add_argument("--GRAD_CLIP", default=1, type=float)
    parse.add_argument("--vocab_path", default='./work/json/', type=str)
    parse.add_argument("--do_cnn",
                       default=False,
                       action="store_true",
                       help="Whether to run cnn training.")
    parse.add_argument("--do_rnn",
                       default=True,
                       action="store_true",
                       help="Whether to run rnn training.")
    parse.add_argument("--do_avg",
                       default=False,
                       action="store_true",
                       help="Whether to run avg training.")

    parse.add_argument("--num_filter",
                       default=100,
                       type=int,
                       help="CNN模型一个filter的输出channels")

    args = parse.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    args.device = device

    if torch.cuda.is_available():
        print('=======use gpu=======')
    else:
        print('=======use cpu=======')

    set_seed()

    if os.path.exists(args.train_data_dir) and os.path.exists(
            args.dev_data_dir) and os.path.exists(args.test_data_dir):
        print('=======load data========')
        train_data = torch.load(args.train_data_dir)
        dev_data = torch.load(args.dev_data_dir)
        test_data = torch.load(args.test_data_dir)
    else:
        src_data, labels = read_corpus(args.train_data_dir)
        src_tr_data, src_te_data = src_data[:args.train_len], src_data[
            args.train_len:]
        X_train, X_val, y_train, y_val = train_test_split(
            src_tr_data,
            labels,
            test_size=args.sample_rate,
            random_state=args.random_state)

        train_data = [(text, labs) for text, labs in zip(X_train, y_train)]
        dev_data = [(text, labs) for text, labs in zip(X_val, y_val)]
        torch.save(train_data, './work/cache/cache_train_data')
        torch.save(train_data, './work/cache/cache_dev_data')

    vocab_list = build_vocab(args.vocab_path)
    #label_map = vocab.labels
    #print(label_map)
    num_classes = 20

    if args.do_train:
        if args.do_cnn:
            cnn_model = CNN(len(vocab.vocab),
                            args.embed_size,
                            args.num_filter, [2, 3, 4],
                            len(label_map),
                            dropout=args.dropout_rate)
            cnn_model.to(device)
            train(args, cnn_model, train_data, dev_data, vocab, dtype='CNN')

        if args.do_avg:
            avg_model = WordAVGModel(len(vocab.vocab),
                                     args.embed_size,
                                     len(label_map),
                                     dropout=args.dropout_rate)
            avg_model.to(device)
            train(args, avg_model, train_data, dev_data, vocab, dtype='AVG')

        if args.do_rnn:
            rnn_model = LSTM(vocab_list,
                             args.embed_size,
                             args.hidden_size,
                             num_classes,
                             n_layers=1,
                             bidirectional=True,
                             dropout=args.dropout_rate)
            rnn_model.to(device)
            train(args,
                  rnn_model,
                  train_data,
                  dev_data,
                  vocab_list,
                  dtype='LSTM')
    else:
        print('pass training...')

    if args.do_test:

        cirtion = nn.CrossEntropyLoss()
        """
        cnn_model = CNN(len(vocab.vocab), args.embed_size, args.num_filter, [2, 3, 4], len(label_map),
                        dropout=args.dropout_rate)
        print('load model')
        cnn_model.load_state_dict(torch.load('creative_classifa-best-CNN.th'))
        cnn_model.to(device)
        print('start predict')
        cnn_result = test(args, cirtion, cnn_model, test_data, vocab)
        print('end predict')
        cnn_sub = pd.DataFrame(cnn_result)
        cnn_sub.to_csv('./work/cnn_test_props_creative_id.csv')
        print('Submission save sucessfully!')
        """
        """
        avg_model = WordAVGModel(len(vocab.vocab), args.embed_size, len(label_map), dropout=args.dropout_rate)
        avg_model.load_state_dict(torch.load('classifa-best-AVG.th'))
        avg_model.to(device)
        avg_result = test(args, cirtion, avg_model, test_data, vocab)
        avg_sub = pd.DataFrame(avg_result)
        avg_sub.to_csv('./work/test_props_feature/ad_avg_sub.csv')
        print('Submission save sucessfully!')
        """
        rnn_model = LSTM(vocab_list,
                         args.embed_size,
                         args.hidden_size,
                         num_classes,
                         n_layers=1,
                         bidirectional=True,
                         dropout=args.dropout_rate)
        rnn_model.load_state_dict(torch.load('classifa-best-LSTM.th'))
        rnn_model.to(device)
        rnn_result = test(args, cirtion, rnn_model, test_data, vocab_list)
        rnn_sub = pd.DataFrame(rnn_result)
        len(rnn_sub)
        rnn_sub.to_csv('./work/lstm_sub.csv')
        print('Submission save sucessfully!')
コード例 #30
0
ファイル: features.py プロジェクト: Noxxel/floter
TLoader = DataLoader(tset,
                     batch_size=batch_size,
                     shuffle=True,
                     drop_last=True,
                     num_workers=num_workers)
VLoader = DataLoader(vset,
                     batch_size=batch_size,
                     shuffle=False,
                     drop_last=True,
                     num_workers=num_workers)

model = LSTM(n_mels, batch_size, num_layers=n_layers)
loss_function = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=l_rate)
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True)
""" stateD = torch.load("lstm_399.nn")
model.load_state_dict(stateD['state_dict']) """
val_loss_list, val_accuracy_list, epoch_list = [], [], []
loss_function.to(device)
model.to(device)
model.hidden = model.init_hidden(device)
#optimizer.load_state_dict(stateD['optim'])

for epoch in tqdm(range(n_epochs), desc='Epoch'):
    train_running_loss, train_acc = 0.0, 0.0
    model.train()
    for idx, (X, y) in enumerate(tqdm(TLoader, desc="Training")):
        X, y = X.to(device), y.to(device)
        model.zero_grad()
        out = model(X)
        loss = loss_function(out, y)