def evaluate_wass_dist(dataset_name, fake_samples): gan_algorithm = 'WGAN' opts = load_opts(dataset_name, gan_algorithm) # update work dir #opts['work_dir'] = '../' + opts['work_dir'] print(opts['work_dir']) if not os.path.isdir(opts['work_dir']): print('No working directory') data = datahandler.DataHandler(opts) data._load_data(opts) # load fake samples #fake_samples = np.load(opts['work_dir'] + '/samples.npy') #print(fake_samples.shape) opts['fake_points'] = fake_samples g = gan_test.WassersteinGAN(opts, data) g.evaluate()
def main(): dataset_name = sys.argv[1] gan_algorithm = sys.argv[2] # loads options relevant to the dataset, gan algorithm opts = load_opts(dataset_name, gan_algorithm) if os.path.isdir(opts['work_dir']): shutil.rmtree(opts['work_dir']) os.mkdir(opts['work_dir']) # loads train, test datasets data = datahandler.DataHandler(opts) data._load_data(opts) gan_dict = { 'AdaGAN': gan.AdaGAN, 'UnrolledGAN': gan.UnrolledGAN, 'WGAN': gan.WassersteinGAN, 'VEEGAN': gan.VEEGAN } # closes the tf graph with gan_dict[gan_algorithm](opts, data) as g: # trains the GAN g.train() # sample samples = g._sample_internal() # save samples and loss plots np.save(os.path.join(opts['work_dir'], 'samples'), samples) np.save(os.path.join(opts['work_dir'], 'epoch_g_loss'), g._epoch_g_loss) np.save(os.path.join(opts['work_dir'], 'epoch_d_loss'), g._epoch_d_loss) # compute some metrics """
# USER PROGRAMMER import sys import datahandler if not len(sys.argv) == 3 and not len(sys.argv) == 4: print("usage : python main.py <exel filename> <total_avrg> <sd = 20>") exit(-1) # exit()는 종료하라는 의미이며, -1을 쓴 것은 비정상적인 종료라는 것을 알려주는 것 dh = datahandler.DataHandler(sys.argv[1]) if len(sys.argv) == 3: dh.get_evaluation(sys.argv[2]) elif len(sys.argv) == 4: dh.get_evaluation(int(sys.argv[2]), int(sys.argv[3]))
from __future__ import print_function import datahandler import networkhandler import rootmodel import keras from keras.datasets import cifar10 from keras.layers import concatenate #import cifar batch_size = 32 epochs = 200 lrate = 0.01 decay = lrate / epochs #init and load cifar dataset bin_data = datahandler.DataHandler() bin_data.load_cifar_data_set(2, "binary") bin_data.normalize(255) #dataset.sort_data_by_label(dataset.x_train, dataset.y_train) #init network root_network = networkhandler.Network("binary", batch_size, 2, epochs, True, bin_data) #define root network model root_network.define_model(rootmodel.newModel(bin_data.x_train, 2)) root_network.preprocess() opt = keras.optimizers.SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False) root_network.compile(opt) print(root_network.model.summary())
def __init__(self, filename=None): self.dataHandler = datahandler.DataHandler(filename)
def __init__(self): self.datahandler = datahandler.DataHandler() self.foodOptions = self.datahandler.getData()
def data_handler(): dh = datahandler.DataHandler() return dh
import datahandler # 학년 전체 평균 : 50 dh = datahandler.DataHandler('class_2-3.xlsx') dh.get_evaluation(50)
LAG_DAYS = 3 startdate = '20000101' # YYYYMMDD indices = ["%5EGSPC", "%5EIXIC", "%5EFVX", "%5ETYX", "%5EXMI", "%5ENYA"] #Neural Network INPUT = len(indices) * (LAG_DAYS+1) HIDDEN = 12 OUTPUT = 1 #Training ITERATIONS = 20 LRATE = 0.4 MOMENTUM = 0.6 data = dh.DataHandler() data.load_indices(indices, startdate, LAG_DAYS) data.create_data(INPUT, OUTPUT) train, test = data.get_datasets(TRAINING_PERCENT) print "Training:", len(train), "Testing:", len(test) sp_net = nh.NetHandler(INPUT, HIDDEN, OUTPUT, data) train_errors, val_errors = sp_net.train(train, LRATE, MOMENTUM, ITERATIONS) out_ser = sp_net.get_output(test, TRAINING_PERCENT) print "Net Topology: %d-%d-%d" % (INPUT, HIDDEN, OUTPUT) print sp_net.change_tomorrow() correct = 0 total = 0 misses = 0