def getValue12(): input = request.form["input"] DataHolder.convo.append(input) next = DataHolder.getSentence(input) if next == "NULL": return render_template(DataHolder.findFinal()) DataHolder.convo.append(next) return render_template("question13.html", prevOutput1=DataHolder.first, prevInput1=DataHolder.convo[0], prevOutput2=DataHolder.convo[1], prevInput2=DataHolder.convo[2], prevOutput3=DataHolder.convo[3], prevInput3=DataHolder.convo[4], prevOutput4=DataHolder.convo[5], prevInput4=DataHolder.convo[6], prevOutput5=DataHolder.convo[7], prevInput5=DataHolder.convo[8], prevOutput6=DataHolder.convo[9], prevInput6=DataHolder.convo[10], prevOutput7=DataHolder.convo[11], prevInput7=DataHolder.convo[12], prevOutput8=DataHolder.convo[13], prevInput8=DataHolder.convo[14], prevOutput9=DataHolder.convo[15], prevInput9=DataHolder.convo[16], prevOutput10=DataHolder.convo[17], prevInput10=DataHolder.convo[18], prevOutput11=DataHolder.convo[19], prevInput11=DataHolder.convo[20], prevOutput12=DataHolder.convo[21], prevInput12=DataHolder.convo[22], prevOutput13=DataHolder.convo[23])
def setUp(self): self.tested_object = SelfOrganizingMap([4, 4]) self.data = DataHolder(_filename='./IrisDataTrain.xls', _number_of_fetures=4) self.data.normalize_features() self.data.encode_labels() self.tested_object.set_input_len(4)
def __init__(self, arg): if type(arg) is int: self.data = DataHolder.get("https://pokeapi.co/api/v2/move/%d/" % arg) elif type(arg) is str: self.data = DataHolder.get(arg) self.type = Type(self.data["type"]["url"]) self.pp = self.data["pp"]
def getValue6(): input = request.form["input"] DataHolder.convo.append(input) next = DataHolder.getSentence(input) if next == "NULL": return render_template(DataHolder.findFinal()) DataHolder.convo.append(next) return render_template("question7.html", prevOutput1 = DataHolder.first, prevInput1 = DataHolder.convo[0], prevOutput2 = DataHolder.convo[1], prevInput2 = DataHolder.convo[2], prevOutput3 = DataHolder.convo[3], prevInput3 = DataHolder.convo[4], prevOutput4 = DataHolder.convo[5], prevInput4 = DataHolder.convo[6], prevOutput5 = DataHolder.convo[7], prevInput5 = DataHolder.convo[8], prevOutput6 = DataHolder.convo[9], prevInput6 = DataHolder.convo[10], prevOutput7 = DataHolder.convo[11])
def getValue2(): input = request.form["input"] DataHolder.convo.append(input) next = DataHolder.getSentence(input) if next == "NULL": return render_template(DataHolder.findFinal()) DataHolder.convo.append(next) return render_template("question3.html", prevOutput1 = DataHolder.first, prevInput1 = DataHolder.convo[0], prevOutput2 = DataHolder.convo[1], prevInput2 = DataHolder.convo[2], prevOutput3 = DataHolder.convo[3])
def setUpClass(cls): data_name = "toy_160_90_3_data.npy" label_name = "toy_160_90_3_labels.npy" cls.config3d = Config(epochs=1, architecture=[4], num_steps=100, save_step=10) cls.config_green = Config(epochs=1, architecture=[4], num_steps=100, save_step=10) cls.config_gray = Config(epochs=1, architecture=[4], num_steps=100, save_step=10) cls.config_bin = Config(epochs=1, architecture=[4], num_steps=100, save_step=10) cls.data_aug = DataHolder(cls.config3d, data_name, label_name, record_path="toy_aug", flip=True, augmentation=True) cls.data_gray = DataHolder(cls.config_gray, data_name, label_name, record_path="toy_gray", flip=True, augmentation=False, gray=True) cls.data_green = DataHolder(cls.config_green, data_name, label_name, record_path="toy_green", flip=True, augmentation=False, green=True) cls.data_binary = DataHolder(cls.config_bin, data_name, label_name, flip=True, augmentation=False, record_path="toy_bin", binary=True) cls.data_aug.create_records() cls.data_gray.create_records() cls.data_green.create_records() cls.data_binary.create_records() cls.all_paths = ["toy_aug", "toy_gray", "toy_green", "toy_bin"] cls.data_list = [cls.data_gray, cls.data_green, cls.data_binary] cls.end = False
def records_generator(height, width, channels, data_path, label_path, name, flip=False, augmentation=False, gray=False, green=False, binary=False): """ Generates tfrecords. :param height: image height :type heights: int :param width: image width :type width: int :param channels: image channels :type channels: int :param data_path: path to load data np.array :type data_path: str :param record_path: path to load labels np.array :type label_path: str :param name: path to save tfrecord :type name: str :param flip: param to control if the data will be flipped :type flip: boolean :param augmentation: param to control if the data will augmented :type augmentation: boolean :param gray: param to control if the data will be grayscale images :type gray: boolean :param green: param to control if the data will use only the green channel :type green: boolean :param binary: param to control if the data will be binarized :type binary: boolean """ config = Config(height=height, width=width, channels=channels) data = DataHolder(config, data_path=data_path, label_path=label_path, record_path=name, flip=flip, augmentation=augmentation, gray=gray, green=green, binary=binary, records=None) data.create_records()
def __init__(self, arg): """ initializes the constructor :param arg: an url or an id """ if type(arg) is int: self.data = DataHolder.get("https://pokeapi.co/api/v2/item/%d/" % arg) elif type(arg) is str: self.data = DataHolder.get(arg) for item in self.data["attributes"]: self.attributes.append(item["name"])
def setUpClass(cls): raw_X = np.load("self_driving_pi_car_data/train_data.npy") raw_y = np.load("self_driving_pi_car_data/train_labels.npy") randomize_in_place(raw_X, raw_y) valid_X = raw_X[0:1000] valid_y = raw_y[0:1000] test_X = raw_X[1000:2000] test_y = raw_y[1000:2000] train_X = raw_X[2000:] train_y = raw_y[2000:] valid_X = valid_X.reshape((-1, 3, 45, 80)) test_X = test_X.reshape((-1, 3, 45, 80)) train_X = train_X.reshape((-1, 3, 45, 80)) del raw_X del raw_y cls.command2int = {"forward": 0, "left": 1, "right": 2} cls.int2command = {i[1]: i[0] for i in cls.command2int.items()} train_dataset = TensorDataset( torch.Tensor(train_X), torch.Tensor(train_y).type(torch.LongTensor)) valid_dataset = TensorDataset( torch.Tensor(valid_X), torch.Tensor(valid_y).type(torch.LongTensor)) test_dataset = TensorDataset( torch.Tensor(test_X), torch.Tensor(test_y).type(torch.LongTensor)) cls.cnn_config = CNNConfig() cls.self_driving_data = DataHolder(cls.cnn_config, train_dataset, valid_dataset, test_dataset) batch_X, batch_y = next(iter(cls.self_driving_data.train_loader)) cls.batch_X = batch_X / 255 cls.total_score = 0
def generate_loop(config, text_path, ShowTest=True): """ Genereate sentences in the command line until the user type "*end*" :type config: Config() :type text_path: str :type ShowTest: boolean """ gen_config = deepcopy(config) gen_config.batch_size = gen_config.num_steps = 1 dataholder = DataHolder(text_path=text_path) model = RNNLanguageModel(gen_config, dataholder) with tf.Session(graph=model.graph) as sess: model.saver.restore(sess, model.save_path) if ShowTest: test_pp = run_epoch(model, sess, model.encoded_test) print(('=-=' * 5)) print(('Test perplexity: {}'.format(test_pp))) print(('=-=' * 5)) print(' ') print(('=-=' * 5)) print("Sentence generator\nType '*end*' to break the loop") print(('=-=' * 5)) starting_text = 'i am' while starting_text != "*end*": print(' '.join(generate_sentence(sess, model, config, starting_text=starting_text, temp=1.0))) starting_text = input('> ')
def eval_model_on_test(Model, train_data_path, test_data_path, pkl_path, epochs, embedding_dim, rnn_dim, layers, learning_rate, momentum, bidirectional, toy=False): """ Eval model on param :param model: recurrent model :type model: RNN, LSTM, GRU :param train_data_path: path to train :type train_data_path: str :param test_data_path: path to test :type test_data_path: str :param pkl_path: path to model :type pkl_path: str :param epochs: number of epochs :type epochs: int :param embedding_dim: embedding dimention :type embedding_dim: int :param rnn_dim: rnn hidden size dimention :type rnn_dim: int :param learning_rate: learning rate for the optimizer :type learning_rate: float :param momentum: momentum param :type momentum: float :return: accuracy on the valid data :rtype: float """ TEXT, LABEL, train, valid, test = get_data(train_data_path, test_data_path, toy) current_config = RNNConfig(vocab_size=len(TEXT.vocab), output_dim=len(LABEL.vocab), epochs=epochs, rnn_dim=rnn_dim, embedding_dim=embedding_dim, layers=layers, learning_rate=learning_rate, momentum=momentum, bidirectional=bidirectional) current_data = DataHolder(current_config, train, valid, test) model = Model(current_config) model.load_state_dict(torch.load(pkl_path)) model.to(device) test_bach = next(iter(current_data.test_iter)) acc, _, _ = model.evaluate_bach(test_bach, device) return acc
def main(): """ Basic script that shows the training of the model, the accuracy of the test and valid datasets, and the prediction of one specific image. """ train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d( ) # my_config = Config() my_config = Config(learning_rate=0.52660658241, dropout=0.75, batch_size=230, steps_for_decay=800, num_filters_1=12, hidden_nodes_3=300, hidden_nodes_1=900, decay_rate=0.349144998004, num_filters_2=24, patch_size=11, image_size=28, hidden_nodes_2=600) my_dataholder = DataHolder(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels) my_model = CNNModel(my_config, my_dataholder) train_model(my_model, my_dataholder, 4 * 10001, 1000) print("check_valid = ", check_valid(my_model)) print("check_test = ", check_test(my_model)) one_example = test_dataset[0] one_example = one_example.reshape(1, one_example.shape[0], one_example.shape[1], one_example.shape[2]) prediction = chr(one_prediction(my_model, one_example) + ord('A')) real = chr(np.argmax(test_labels[0]) + ord('A')) print("Prediction = {}".format(prediction)) print("Real label = {}".format(real))
def __init__(self, parent=None): QtWidgets.QMainWindow.__init__(self, parent) self.setupUi(self) data = DataHolder(_filename='./IrisDataTrain.xls', _number_of_fetures=4) self._model = MyTableModel(self) self._model.update(data.rawdata.values) self.data_table.setModel(self._model)
def setUpClass(cls): cls.train_dataset, cls.train_labels, cls.valid_dataset, cls.valid_labels, cls.test_dataset, cls.test_labels = get_data_4d() dataholder = DataHolder(cls.train_dataset, cls.train_labels, cls.valid_dataset, cls.valid_labels, cls.test_dataset, cls.test_labels) config = Config() cls.model = CNNModel(config, dataholder)
def setUp(self): self.filename = './IrisDataTrain.xls' self.data = DataHolder(self.filename, _number_of_fetures=4, _class_column=4, _rows_to_skip=0) ## # self.filename = './breast_cancer_data.xls' # self.data = DataHolder(self.filename, # _number_of_fetures=9, # _class_column=0, # _rows_to_skip=3) ## # self.filename = './WineData.xls' # self.data = DataHolder(self.filename, # _number_of_fetures=13, # _class_column=0, # _rows_to_skip=1) # self.model = NeuronNetwork() # self.model.create_network(4, [10, 7, 4, 3]) # self.model = MLPNetwork() # self.model.create(no_of_layers=4, size_of_each_layer=[self.data.number_of_features,20,10, 3]) # self.model = SomMlpNetwork() self.model.create(no_of_layers=4, size_of_each_layer=[4, 8, 8, 3], som_size=[3, 3], som_filename='trained_som.pkl') # self.model = SomMlpNetwork() # self.model.create(no_of_layers=4, # size_of_each_layer=[4,10,7, 3], # som_size=[3, 3]) self.crossvali = Crossvalidation() self.crossvali.set_data(self.data) self.crossvali.set_parameters(5) self.crossvali.generate_validation_training_sets() self.crossvali.model = self.model self.data.encode_labels() self.data.normalize_features()
def getDisplayName(self): """ Returns the name of the pokemon according to language. return type: string. """ speciesData = DataHolder.get(self.pokemonData["species"]["url"]) for name in speciesData["names"]: if name["language"]["name"] == Env().loc: return name["name"]
def __init__(self): pokemonGen1Data = DataHolder.get( "https://pokeapi.co/api/v2/generation/1/") for spiece in pokemonGen1Data["pokemon_species"]: pokemon = PokemonInfo(spiece["url"]) self.pokemons[str(pokemon.getPokedexId())] = { "discover": False, "caught": False, "pokemon": pokemon }
def __init__(self, url): """ initializes the constructor :param data: an url of the specie """ # self.pokemonData = DataHolder.get(url) # self.speciesData = DataHolder.get(url) # pknmUrl = next(var for var in self.speciesData["varieties"] if var["is_default"])[ # "pokemon"]["url"] self.pokemonData = DataHolder.get(url)
class HTTPManager(): def __init__(self, address): print "In HTTPManager" self.address = address self.data_holder = DataHolder() def load_data(self): print "In HTTPManager\\load_data" station_list = self._get_stations() #TODO: release class Station; should return list of Station object self.data_holder.upload_stations(station_list) pass # TODO: release main quest logic def get_routes(self, point_from, point_to): print "In HTTPManager\\get_routes" pass def _get_stations(self): print "In HTTPManager\\_get_stations" # return [(station_site_interal_id, station_name), ...] return [(31, "Berlin Central Coach Station, Berlin (Germany)"), (285, "Minsk Centralnyj, Minsk (Belarus)"), (319, "Moscow, Bus station \"Stantsiya Tushinskaya\" (Russian Federation)"), (10, "Riga Coach Station, Riga (Latvia)")]
def getDisplayAbilities(self): """ returns A list of abilities this Pokémon could potentially have depending on the language of the System. return type : list. """ for abilitie in self.pokemonData["abilities"]: self.abilitiesData.append( DataHolder.get(abilitie["ability"]["url"])["names"]) for name in self.abilitiesData: for i in range(0, 10): if name[i]["language"]["name"] == Env().loc: self.abilities.append(name[i]["name"]) return self.abilities
def setUpClass(cls): config3 = Config() config_gray = Config() config_green = Config() config_bin = Config() data_name = "toy_160_90_3_data.npy" label_name = "toy_160_90_3_labels.npy" cls.original_dh = DataHolder(config3, data_name, label_name, record_path="toy") cls.original_flip = DataHolder(config3, data_name, label_name, record_path="toy_flip", flip=True) cls.original_aug = DataHolder(config3, data_name, label_name, record_path="toy_aug", augmentation=True) cls.original_gray = DataHolder(config_gray, data_name, label_name, record_path="toy_gray", gray=True) cls.original_green = DataHolder(config_green, data_name, label_name, record_path="toy_green", green=True) cls.original_binary = DataHolder(config_bin, data_name, label_name, record_path="toy_bin", binary=True) cls.all_dataholders_no_new = [cls.original_dh, cls.original_gray, cls.original_green, cls.original_binary] cls.all_paths = ["toy", "toy_flip", "toy_aug", "toy_gray", "toy_green", "toy_bin"] cls.original_dh.create_records() cls.original_flip.create_records() cls.original_aug.create_records() cls.original_gray.create_records() cls.original_green.create_records() cls.original_binary.create_records()
def loadDataSet(useBrainData): if useBrainData: DH = DataHolder("C:/Users/Pontus/Desktop/Dippa/dataset" ) # delning med / ska användas subs = list(DH.subjects.keys()) D, i = DH.subjects.get(subs[3]).getDataAndInfoForSubject() DS = smallDataset.SmallDataset(D, i) else: print("downloading data...") X_d, y_d = fetch_openml('mnist_784', version=1, return_X_y=True) randInd = np.random.choice(np.arange(0, 69000, 1), (1000)) X = X_d[randInd, :] y = y_d[randInd] X = X / 255 X = np.reshape(X, (1000, 28, 28, 1)) DS = smallDataset.SmallDataset(X, y) return DS
class TestSelfOrganizingMap(TestCase): def setUp(self): self.tested_object = SelfOrganizingMap([4, 4]) self.data = DataHolder(_filename='./IrisDataTrain.xls', _number_of_fetures=4) self.data.normalize_features() self.data.encode_labels() self.tested_object.set_input_len(4) def tearDown(self): pass def test__initialize(self): map = self.tested_object._initialize([5, 5]) print('test__initialize zakonczony') def test_run(self): self.tested_object.run(self.data.get_features(), self.data.get_labels()) print('sialalalala')
kls = Classificator.from_pickle() for file in files: dict_file_names = file.split('\\') file_name = dict_file_names[len(dict_file_names) - 1] img = cv.imread(file) rd = RectangleDetector(img) cropped_images = rd.crop() if cropped_images: entities = [] i = 0 for cropped_image in cropped_images: i += 1 loc = '{}\\{}-{}'.format("resources\\images\\cropped_images", i, file_name) cv.imwrite(loc, cropped_image) dh = DataHolder(file_name) dh.set_cropped_image(cropped_image) entities.append(dh) i = 0 for entity in entities: i += 1 cv_image = entity.cropped_image pil_image = cv.cvtColor(cv_image, cv.COLOR_BGR2RGB) pil_image = Image.fromarray(pil_image) closest = kls.classify(pil_image) entity.recognised_name = closest loc = '{}\\{}-{}'.format("resources\\images\\recognised_images", i, closest) cv.imwrite(loc, cv_image) print("-------------") print(file_name, closest)
class TestDataHolder(TestCase): def setUp(self): self.filename = './IrisDataTrain.xls' self.data_holder = DataHolder(_filename=self.filename, _number_of_fetures=4) def tearDown(self): pass def test_load_data(self): self.data_holder.load_data('lalala.xls', 4) self.assertRaises(FileNotFoundError) self.data_holder.load_data('lalala.xxx', 4) self.assertRaises(Exception) self.assertIsNotNone(self.data_holder.load_data(self.filename, 4)) def test_set_features(self): self.data_holder.set_features() self.assertEqual(len(self.data_holder.features[1, :]), self.data_holder.number_of_features) #def test_get_features(self): # self.fail() def test_set_labels(self): self.data_holder.set_labels() self.assertEqual(len(self.data_holder.labels[1, ]), 1, msg='Labels length should be equal 1') #def test_get_labels(self): # self.fail() def test_encode_labels(self): self.data_holder.encode_labels() self.assertEqual(len(self.data_holder.encoded_labels[1, :]), 3, msg='Should equal 3 for Iris Data')
def __init__(self, address): print "In HTTPManager" self.address = address self.data_holder = DataHolder()
def parseParameters(self): currentParam = ' ' foundParamWithoutEqualSign = False dh = DataHolder() for i in range(0, len(self.paramsData)): if (dh.set( re.match('^[\s]*FILL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'FILL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*\*FILL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = '*FILL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*AREA[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'AREA' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*VOL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'VOL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*U[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'U' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*TRCL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'TRCL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*\*TRCL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = '*TRCL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*LAT[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'LAT' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*TR[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'TR' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*IMP[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'IMP' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*MAT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'MAT' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*RHO[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'RHO' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*PWT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'PWT' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*EXT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'EXT' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*FCL[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'FCL' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*WWN[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'WWN' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*DXC[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'DXC' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*NONU[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'NONU' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*PD[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'PD' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set( re.match('^[\s]*TMP[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i], flags=re.IGNORECASE))): currentParam = 'TMP' self.params[currentParam] = [] self.params[currentParam].append(dh.value.groupdict()['data']) else: if (currentParam == ' '): pass elif (len(self.paramsData[i]) > 0 and (self.paramsData[i][0] == ':' or self.paramsData[i][0] == '=') and foundParamWithoutEqualSign): # found a '=' of the parameter just found, test if the previous entry was the parameters, instead the '=' is a part of the parameter list self.params[currentParam].append(self.paramsData[i][1:]) else: self.params[currentParam].append(self.paramsData[i]) foundParamWithoutEqualSign = False # combine all the parameter data per parameter for param in self.params: if len(self.params[param]) == 0: continue container.Container.remove_values_from_list(self.params[param], "") paramString = ' '.join(self.params[param]) self.params[param] = paramString self.interpretParameters()
def setUp(self): self.filename = './IrisDataTrain.xls' self.data_holder = DataHolder(_filename=self.filename, _number_of_fetures=4)
matplotlib.use('Agg') import matplotlib.pyplot as plt currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) from util import run_test, get_data_4d, get_time from CNN import CNNModel, train_model, check_valid from DataHolder import DataHolder from Config import Config train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d() my_dataholder = DataHolder(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels) number_of_exp = 10 DECAY = np.random.random_sample([number_of_exp]) DECAY = np.append(DECAY, 0.96) number_of_exp += 1 DECAY.sort() results = [] duration = [] info = [] for i, de in enumerate(DECAY): print("\n ({0} of {1})".format(i + 1, number_of_exp))
def parseParameters(self): currentParam = ' ' foundParamWithoutEqualSign = False dh = DataHolder() for i in range(0,len(self.paramsData)): if (dh.set(re.match('^[\s]*FILL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'FILL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*\*FILL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = '*FILL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*AREA[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'AREA' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*VOL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'VOL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*U[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'U' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*TRCL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'TRCL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*\*TRCL[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = '*TRCL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*LAT[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'LAT' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*TR[\s]*[=]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'TR' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*IMP[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'IMP' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*MAT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'MAT' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*RHO[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'RHO' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*PWT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'PWT' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*EXT[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'EXT' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*FCL[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'FCL' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*WWN[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'WWN' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*DXC[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'DXC' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*NONU[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'NONU' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*PD[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'PD' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) elif (dh.set(re.match('^[\s]*TMP[\s]*[:]?(?P<data>[\S]*)', self.paramsData[i] ,flags=re.IGNORECASE))): currentParam = 'TMP' self.params[currentParam] = []; self.params[currentParam].append(dh.value.groupdict()['data']) else: if (currentParam == ' '): pass elif (len(self.paramsData[i]) > 0 and (self.paramsData[i][0] == ':' or self.paramsData[i][0] == '=') and foundParamWithoutEqualSign): # found a '=' of the parameter just found, test if the previous entry was the parameters, instead the '=' is a part of the parameter list self.params[currentParam].append(self.paramsData[i][1:]) else: self.params[currentParam].append(self.paramsData[i]) foundParamWithoutEqualSign = False # combine all the parameter data per parameter for param in self.params: if len(self.params[param]) == 0: continue container.Container.remove_values_from_list(self.params[param], "") paramString = ' '.join(self.params[param]) self.params[param] = paramString self.interpretParameters()
def get_data(self): self.Zscore = DataHolder.get_zscore(self.coins) self.beta = DataHolder.get_beta(self.coins) self.currencies, self.name1, self.name2 = DataHolder.get_currencies( self.coins)
def optmizers_search(name_tfrecords, records, height, width, channels, architecture, activations, conv_architecture, kernel_sizes, pool_kernel, batch_size, epochs, num_steps, save_step, learning_rate, conv): """ Script to run optmizers search, the result is saved on the file optmizers_results.txt :param name_tfrecords: name of the used tfrecords :type name_tfrecords: str :param records: list of paths to train, test, and valid tfrecords :type records: list of str :param height: image height :type heights: int :param width: image width :type width: int :param channels: image channels :type channels: int :param architecture: network architecture :type architecture: list of int :param activations: list of different tf functions :type activations: list of tf.nn.sigmoid, tf.nn.relu, tf.nn.tanh :param conv_architecture: convolutional architecture :type conv_architecture: list of int :param kernel_sizes: filter sizes :type kernel_sizes: list of int :param pool_kernel: pooling filter sizes :type pool_kernel: list of int :param batch_size: batch size for training :type batch_size: int :param epochs: number of epochs :type epochs: int :param num_steps: number of iterations for each epoch :type num_steps: int :param save_step: when step % save_step == 0, the model parameters are saved. :type save_step: int :param learning_rate: learning rate for the optimizer :type learning_rate: float :param conv: param to control if the model will be a CNN or DFN :type conv: bool """ OT = [tf.train.GradientDescentOptimizer, tf.train.AdadeltaOptimizer, tf.train.AdagradOptimizer, tf.train.AdamOptimizer, tf.train.FtrlOptimizer, tf.train.ProximalGradientDescentOptimizer, tf.train.ProximalAdagradOptimizer, tf.train.RMSPropOptimizer] OT_name = ["GradientDescentOptimizer", "AdadeltaOptimizer", "AdagradOptimizer", "AdamOptimizer", "FtrlOptimizer", "ProximalGradientDescentOptimizer", "ProximalAdagradOptimizer", "RMSPropOptimizer"] numeric_result = [] results = [] info = [] if conv: net_name = "CNN" else: net_name = "DFN" header = "\nSearching optimizer for the {} model in the {} data\n".format(net_name, # noqa name_tfrecords) # noqa print(header) for name, opt in zip(OT_name, OT): config = Config(height=height, width=width, channels=channels, architecture=architecture, activations=activations, conv_architecture=conv_architecture, kernel_sizes=kernel_sizes, pool_kernel=pool_kernel, batch_size=batch_size, epochs=epochs, num_steps=num_steps, save_step=save_step, learning_rate=learning_rate, optimizer=opt) data = DataHolder(config, records=records) print(name + ":\n") graph = tf.Graph() if conv: network = CNN(graph, config) else: network = DFN(graph, config) trainer = Trainer(graph, config, network, data) trainer.fit(verbose=True) valid_acc = trainer.get_valid_accuracy() numeric_result.append(valid_acc) name += ': valid_acc = {0:.6f} | '.format(valid_acc) test_images, test_labels, _ = reconstruct_from_record(data.get_test_tfrecord()) # noqa test_images = test_images.astype(np.float32) / 255 test_pred = trainer.predict(test_images) acc_cat = accuracy_per_category(test_pred, test_labels, categories=3) for i, cat_result in enumerate(acc_cat): name += int2command[i] + ": = {0:.6f}, ".format(cat_result) results.append(name) if os.path.exists("checkpoints"): shutil.rmtree("checkpoints") info.append(str(config)) best_result = max(list(zip(numeric_result, OT_name, info))) result_string = """In an experiment with different optmizers the best one is {0} with valid accuracy of {1}. \nThe training uses the following params: \n{2}\n""".format(best_result[1], best_result[0], best_result[2]) file = open("optmizers_results.txt", "w") file.write(header) file.write("Results for different optmizers\n") for result in results: result += "\n" file.write(result) file.write("\n") file.write(result_string) file.close()
def __init__(self, arg): if type(arg) is int: self.pokemonData = DataHolder.get( "https://pokeapi.co/api/v2/pokemon/%d/" % arg) self.speciesData = DataHolder.get( "https://pokeapi.co/api/v2/pokemon-species/%d/" % arg) elif type(arg) is str: self.pokemonData = DataHolder.get(arg) self.speciesData = DataHolder.get( self.pokemonData["species"]["url"]) self.level = GameCtx.getRandomPokemonLevel() self.captureRate = self.speciesData["capture_rate"] self.level = GameCtx.getRandomPokemonLevel() self.gender = Pokemon.NOSEX if self.speciesData[ "gender_rate"] == -1 else Pokemon.FEMALE if random.randint( 0, 8) < self.speciesData["gender_rate"] else Pokemon.MALE self.iv = GameCtx.genRadomIV() self.moves = [] moveCursor = 0 for move in self.pokemonData["moves"]: gen = next((g for g in move["version_group_details"] if g["version_group"]["name"] == "red-blue"), False) if gen and gen["move_learn_method"]["name"] == "level-up" and gen[ "level_learned_at"] <= self.level: if len(self.moves) < 4: self.moves.append(Move(move["move"]["url"])) else: self.moves[moveCursor] = Move(move["move"]["url"]) moveCursor = (moveCursor + 1) % 4 self.levels = DataHolder.get( self.speciesData["growth_rate"]["url"])["levels"] self.experience = next(level for level in self.levels if level["level"] == self.level)["experience"] self.type1 = Type( next(t for t in self.pokemonData["types"] if t["slot"] == 1)["type"]["url"]) type2 = next((t for t in self.pokemonData["types"] if t["slot"] == 2), None) self.type2 = Type(type2["type"]["url"]) if type2 != None else None self.stats = { "speed": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "speed"), "special-defense": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "special-defense"), "special-attack": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "special-attack"), "defense": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "defense"), "attack": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "attack"), "hp": next(stat for stat in self.pokemonData["stats"] if stat["stat"]["name"] == "hp") } self.fightStats = { "speed": 6, "special-defense": 6, "special-attack": 6, "defense": 6, "attack": 6, "accuracy": 6, "dodge": 6 } self.updateStats() self.hp = self.maxHp