示例#1
0
exp_dir = "test_dir"
batch_size = 10
sparsity_loss_weight = 0.0001
decay_rate = 0.95
decay_every = 500
init_localearning_rate = 0.02

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

col_metadata = pkl.load(open(col_metadata_file, 'rb'))

data_preprocess = Dataset(input_file, col_metadata, sep=';', exp_dir=exp_dir)

data_preprocess.split_dataset()

train_data, test_data, val_data = data_preprocess.load_dataset()

inp_feature_columns = data_preprocess.make_feature_layer()

MODEL = TabNet(feature_columns=inp_feature_columns,
               num_features=16,
               feature_dim=128,
               output_dim=64,
               num_decision_steps=6,
               relaxation_factor=1.5,
               virtual_batch_size=10,
               num_classes=2,
               batch_size=10,
               batch_momentum=0.7,
               is_training=True)
示例#2
0
print(__doc__)

import math
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from data import Dataset
from sklearn.preprocessing import StandardScaler

# Load the insurance dataset
dataset = Dataset(432340, 16)
insurance = dataset.load_dataset('train')

# Use only one feature
#insurance_X = insurance[:, np.newaxis, 2]

# Split the data into training/testing sets

insurance_X = []
insurance_y = []

for b in insurance:
    #insurance_X.append([b[0][0].double(), b[0][1].double()])
    insurance_X.append([b[0][0][0], b[0][0][1]])
    insurance_y.append(b[1][0])

#insurance_X = insurance_X[:, np.newaxis, 7]

insurance_X_train = np.array(insurance_X[:-1500])
insurance_X_test = np.array(insurance_X[-1500:])
示例#3
0
    parser.add_argument("train", help="training", type=bool)
    parser.add_argument("config", help="config file path", type=str)
    args = parser.parse_args()

    with open(args.config) as f:
        config = yaml.load(f)
        if args.train:
            config = config["train"]
        else:
            config = config["test"]

    if args.train:
        dataset = Dataset(config["source_data_path"],
                          config["target_data_path"])
        en, ko = dataset.create_dataset()
        en_tensor, en_tokenizer, ko_tensor, ko_tokenizer = dataset.load_dataset(
            config["num_words"])
        en_words_count = len(en_tokenizer.word_index) + 1
        ko_words_count = len(ko_tokenizer.word_index) + 1

        train_ds = tf.data.Dataset.from_tensor_slices(
            (en_tensor, ko_tensor)).shuffle(10000).batch(
                config["batch_size"]).prefetch(1024)
        model = Seq2seq(source_words_count=en_words_count,
                        target_words_count=ko_words_count,
                        sos=ko_tokenizer.word_index["<start>"],
                        eos=ko_tokenizer.word_index["<end>"])

        loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
        optimizer = tf.keras.optimizers.Adam()

        train_loss = tf.keras.metrics.Mean(name='train_loss')
示例#4
0
class Trainer(object):
	def __init__(self, model, config):
		self.model   = model
		self.lr      = config['lr']
		self.epoches = config['epochs']
		self.batches = config['batchs']
		self.samples = config['samples']
		self.dataset = Dataset(self.samples, self.batches)


	def train(self):
		losses = []
		validate = []
		h = []
		t = []

		optimizer = optim.SGD(self.model.parameters(), lr=self.lr)

		for e in range(self.epoches):
			for b, (sample, target) in enumerate(self.dataset.load_dataset('train')):

				sample, target = Variable(sample), Variable(target)

				# clear the local gradients
				optimizer.zero_grad()

				# compute hypothesis and calculate the loss
				hypo = self.model(sample)
				loss = self.model.loss(hypo, target)
				
				# backpropagate gradients of loss w.r.t. parameters
				loss.backward()

				# track loss
				losses.append(loss.data.tolist()[0])

				# update weights
				optimizer.step()

			validate.append(self.validate())

		torch.save(self.model.state_dict(), os.path.join('save', datetime.now().strftime('%m-%d-%Y-%H-%M') + '.pth'))

		self.dataset.subplots_2D(2, 'Training', y_data_list=[losses, validate], subplt_titles=['Cumulative Loss', 'Validation'],
					   x_label=['Iterations', 'Epochs'],
					   y_label=['Cumulative Loss', 'Accuracy'])
		self.dataset.show('Binary Spiral')


	def validate(self):
		self.model.eval()
		losses = []
		correct = 0
		cumulative_loss = 0

		validation_set = self.dataset.load_dataset('validation')

		for data, target in validation_set:
			data, target = Variable(data, volatile=True), Variable(target)
			output = self.model(data)
			cumulative_loss += self.model.loss(output, target, size_average=False).data[0]
			losses.append(cumulative_loss)

			# class of one-hot vector
			pred = output.data.max(1)[1]
			correct += pred.eq(target.data).sum()

		total = len(validation_set) * self.batches
		acc = 100. * correct / total

		return acc
示例#5
0
文件: decoder.py 项目: dykang/biassum
    #Batch
    parser.add_argument("--batch", type=bool, default=True)
    parser.add_argument("--batch_size", type=int, default=25000)
    parser.add_argument("--batch_idx", type=int, default=0)
    # Extractive Algorithms
    parser.add_argument("--algos", nargs='+',
                        default=['first', 'last', 'random', 'mid', # Position
                                 'hard_convex', 'hard_convex_waterfall', 'hard_heuristic', # Diversity
                                 'nearest', 'knn' # Importance
                                ])

    args = parser.parse_args()
    curr_batch_order = args.batch_idx

    d = Dataset(dataset=args.dataset, dataset_type=args.dataset_type, path=args.path)
    pairs = d.load_dataset()

    # #Create PCA if not exists
    filename = os.path.join(args.path, 'pca', 'pca_%s_%s_%s.pkl' % (args.dataset, args.dataset_type, args.emb))
    if not os.path.isfile(filename):
        print('PCA file make!')
        pca_save(pairs, args.emb, args.bert_encode_type, args.encoder_path, filename, args.pca_components)

    if args.batch:
        batches = chunks(pairs,args.batch_size)
        for i in range(curr_batch_order+1):
            batch = next(batches)

    else:
        batch = pairs