def open_settings(self, action, *args): dialog = Gtk.FileChooserDialog( "Open a settings file", self.window, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) self.add_filters_to_chooserdialog(dialog) response = dialog.run() if response == Gtk.ResponseType.OK: # TODO: Check for read permissions # TODO: Check if it's a xjoy settings file # TODO: Check if it's a non corrupted xjoy settings file self.settings, objects = U.load_settings_from_file( dialog.get_filename()) self.window.edit_area.set_objects(objects) elif response == Gtk.ResponseType.CANCEL: print("Cancel clicked") dialog.destroy()
from sklearn.metrics.classification import * from sklearn.metrics.ranking import * from time import time begin = time() """ Here, only the discriminator was used to do the anomaly detection """ # --- get settings --- # # parse command line arguments, or use defaults parser = utils.rgan_options_parser() settings = vars(parser.parse_args()) # if a settings file is specified, it overrides command line arguments/defaults if settings['settings_file']: settings = utils.load_settings_from_file(settings) # --- get data, split --- # data_path = './experiments/data/' + settings['data_load_from'] + '.data.npy' print('Loading data from', data_path) settings["eval_single"] = False settings["eval_an"] = False samples, labels, index = data_utils.get_data( settings["data"], settings["seq_length"], settings["seq_step"], settings["num_signals"], settings["sub_id"], settings["eval_single"], settings["eval_an"], data_path) # --- save settings, data --- # # no need print('Ready to run with settings:') for (k, v) in settings.items(): print(v, '\t', k)
import json from scipy.stats import mode import data_utils import plotting import model import utils from time import time from math import floor from mmd import rbf_mmd2, median_pairwise_distance, mix_rbf_mmd2_and_ratio tf.logging.set_verbosity(tf.logging.ERROR) with tf.device('/gpu:0'): identifier = 'mnistfull' settings = utils.load_settings_from_file(identifier) samples, pdf, labels = data_utils.get_samples_and_labels(settings) locals().update(settings) # json.dump(settings, open('./experiments/settings/' + identifier + '.txt', 'w'), indent=0) data_path = './experiments/data/' + identifier + '.data.npy' np.save(data_path, {'samples': samples, 'pdf': pdf, 'labels': labels}) print('Saved training data to', data_path) # --- build model --- # Z, X, CG, CD, CS = model.create_placeholders(batch_size, seq_length, latent_dim, num_signals, cond_dim)
import model import utils import eval from time import time from math import floor from mmd import rbf_mmd2, median_pairwise_distance, mix_rbf_mmd2_and_ratio tf.logging.set_verbosity(tf.logging.ERROR) begin = time() # --- get settings --- # # parse command line arguments, or use defaults parser = utils.rgan_options_parser() settings = vars(parser.parse_args()) # if a settings file is specified, it overrides command line arguments/defaults if settings['settings_file']: settings = utils.load_settings_from_file(settings) # --- get data, split --- # # samples, pdf, labels = data_utils.get_samples_and_labels(settings) samples, pdf, labels = data_utils.get_data(settings['data'], settings['seq_length'], settings['seq_step'], settings['num_signals']) # --- training sample --- # # --- save settings, data --- # print('Ready to run with settings:') for (k, v) in settings.items(): print(v, '\t', k) # add the settings to local environment # WARNING: at this point a lot of variables appear locals().update(settings) json.dump(settings, open('./experiments/settings/' + identifier + '.txt', 'w'), indent=0)