Ejemplo n.º 1
0
	def __open_selected(self, widget, path):
		"""Open selected item in either active, or new tab"""
		# unquote path before giving it to handler
		if path is not None and '://' in path:
			data = path.split('://', 1)
			data[1] = urllib.unquote(data[1])
			path = '://'.join(data)

		# open selected item
		if self._open_in_new_tab:
			# create new tab
			options = Parameters()
			options.set('path', path)

			self._application.create_tab(
							self._object._notebook,
							self._object.__class__,
							options
						)

		elif hasattr(self._object, 'change_path'):
			self._object.change_path(path)

		# reset values
		self._open_in_new_tab = False

		return True
Ejemplo n.º 2
0
	def _change_path(self, widget=None, new_tab=False):
		"""Change to selected path"""
		selection = self._history_list.get_selection()
		item_list, selected_iter = selection.get_selected()

		# if selection is valid, change to selected path
		if selected_iter is not None:
			path = item_list.get_value(selected_iter, Column.PATH)

			if not new_tab:
				# change path
				self._parent._handle_history_click(path=path)

			else:
				# create a new tab
				options = Parameters()
				options.set('path', path)

				self._application.create_tab(
								self._parent._notebook,
								self._parent.__class__,
								options
							)

			# close dialog
			self._close()
Ejemplo n.º 3
0
def countRegions():
    # Requires: -the first command line argument is the name of the
    #            image to be segmented
    #           -the second command line argument is the color space being
    #            used, either RGB, HSV, or HLS
    # Effects: -calls closure with count foreground argument on, returns
    #           count of distinct foreground objects
    
    colorSpace = argv[2].lower()
    
    if not (colorSpace in ["rgb", "hsv", "hls"]):
        print "Second argument not one of RGB, HSV, or HLS"
        print "The first argument should be the name of the image to be segmented"
        print "Followed by the desire color space representation"
        exit(1)
    
    try:
        image = Image.open(argv[1])
        imageData = colorSpaceConvert(list(image.getdata()), argv[2].lower())
    except:
        print "Invalid or no image name given"
        print "The first argument should be the name of the image to be segmented"
        print "Followed by the desire color space representation"
        exit(1)
        
    if colorSpace == "rgb":
        redMinMax = raw_input("Red min-max, between 0 and 255: ")
        greenMinMax = raw_input("Green min-max, between 0 and 255: ")
        blueMinMax = raw_input("Blue min-max, between 0 and 255: ")
        redMinMax = [float(x) / 255.0 for x in redMinMax.split()]
        greenMinMax = [float(x) / 255.0 for x in greenMinMax.split()]
        blueMinMax = [float(x) / 255.0 for x in blueMinMax.split()]
        colorRanges = [redMinMax, greenMinMax, blueMinMax]
    elif colorSpace == "hsv":
        hueMinMax = raw_input("Hue min-max, between 0 and 360: ")
        satMinMax = raw_input("Saturation min-max, between 0 and 100: ")
        valMinMax = raw_input("Value min-max, between 0 and 100: ")
        hueMinMax = [float(x) / 360.0 for x in hueMinMax.split()]
        satMinMax = [float(x) / 100.0 for x in satMinMax.split()]
        valMinMax = [float(x) / 100.0 for x in valMinMax.split()]
        colorRanges = [hueMinMax, satMinMax, valMinMax]
    else:
        hueMinMax = raw_input("Hue min-max, between 0 and 360: ")
        lightMinMax = raw_input("Lightness min-max, between 0 and 100: ")
        satMinMax = raw_input("Saturation min-max, between 0 and 100: ")
        hueMinMax = [float(x) / 360.0 for x in hueMinMax.split()]
        lightMinMax = [float(x) / 100.0 for x in lightMinMax.split()]
        satMinMax = [float(x) / 100.0 for x in satMinMax.split()]
        colorRanges = [hueMinMax, lightMinMax, satMinMax]
    
    param = Parameters()
    param.setImageSize(image.size)
    param.setColorRanges(colorRanges)
    
    seg = segmentation.colorSegmenter()
    mask = seg.segmentImage(imageData, param, True)
    
    close = closure.closure()
    close.segmentRegions(mask, param, 0, True, False)
Ejemplo n.º 4
0
	def _create_file_list(self, widget=None, data=None):
		"""Create file list in parent notebook"""
		self.__update_path_from_pid()
		DefaultList = self._parent.plugin_classes['file_list']
		options = Parameters()
		options.set('path', self.path)
		self._parent.create_tab(self._notebook, DefaultList, options)
		return True
Ejemplo n.º 5
0
	def _close_tab(self, widget=None, data=None):
		"""Provide additional functionality"""
		if self._notebook.get_n_pages() == 1:
			DefaultList = self._parent.plugin_classes['file_list']
			options = Parameters()
			options.set('path', self.path)

			self._parent.create_tab(self._notebook, DefaultList, options)

		return Terminal._close_tab(self, widget, data)
Ejemplo n.º 6
0
	def open_file(self, selection, application_info=None, exec_command=None):
		"""Open filename using config file or specified execute command"""
		if application_info is not None:
			# get command from config file
			command = application_info.command_line
			
		elif exec_command is not None:
			# use specified command
			command = exec_command
		
		else:
			# raise exception, we need at least one argument
			raise AttributeError('Error opening file. We need command or application to be specified.')
		
		exec_string = self.__format_command_string(selection, command)

		# open selected file(s)
		split_command = shlex.split(exec_string)
		test_command = split_command[0] if len(split_command) > 1 else exec_string

		if is_x_app(test_command):
			os.system('{0} &'.format(exec_string))

		else:
			active_object = self._application.get_active_object()

			options = Parameters()
			options.set('close_with_child', True)
			options.set('shell_command', split_command[0])
			options.set('arguments', split_command)
			options.set('path', os.path.dirname(selection[0]))

			self._application.create_terminal_tab(active_object._notebook, options)
Ejemplo n.º 7
0
	def edit_file(self, selection):
		"""Edit selected filename"""
		section = self._application.options.section('editor')
		command = section.get('default_editor')

		exec_string = self.__format_command_string(selection, command)

		# open selected file(s)
		split_command = shlex.split(exec_string)
		test_command = split_command[0] if len(split_command) > 1 else exec_string

		if (section.get('terminal_command') and section.get('type') == 1) \
		or not is_x_app(test_command):
			active_object = self._application.get_active_object()

			options = Parameters()
			options.set('close_with_child', True)
			options.set('shell_command', split_command[0])
			options.set('arguments', split_command)
			options.set('path', os.path.dirname(selection[0]))

			self._application.create_terminal_tab(active_object._notebook, options)

		else:
			os.system('{0} &'.format(exec_string))
Ejemplo n.º 8
0
 def load_parameters(json_parameters):
   parameters = Parameters()
   parameters.clustering_threshold = json_parameters['clustering_threshold']
   parameters.size_threshold = json_parameters['size_threshold']
   parameters.distance_threshold = json_parameters['distance_threshold']
   parameters.hashing_depth = json_parameters['hashing_depth']
   parameters.clusterize_using_dcup = json_parameters['clusterize_using_dcup']
   parameters.clusterize_using_hash = json_parameters['clusterize_using_hash']
   parameters.report_unifiers = json_parameters['report_unifiers']
   parameters.force = json_parameters['force']
   parameters.use_diff = json_parameters['use_diff']
Ejemplo n.º 9
0
	def create_terminal(self, path, position=None):
		options = Parameters()
		options.set('path', path)

		if position == 'left':
			notebook = self._application.left_notebook
		elif position == 'right':
			notebook = self._application.right_notebook
		else:
			notebook = self._application.get_active_notebook()

		self._application.create_tab(notebook, self._application.plugin_classes['system_terminal'], options)
Ejemplo n.º 10
0
def scan(language, file_manifest, source_file_names):

  # Determine the files to scan. If no files are given, use a default manifest.
  if len(source_file_names) == 0 and file_manifest is None:
    file_manifest = manifest.default_manifest(language)

  source_file_names = set(source_file_names)
  if file_manifest is not None:
    source_file_names.update(set(manifest.contents(file_manifest)))

  supplier = ast_suppliers.abstract_syntax_tree_suppliers[language]

  # TODO: Configuration files!
  parameters = Parameters()
  parameters.distance_threshold = supplier.distance_threshold
  parameters.size_threshold = supplier.size_threshold

  source_files = []

  report = Report(parameters)

  def parse_file(file_name):
    try:
      logging.info('Parsing ' + file_name + '...')
      source_file = supplier(file_name, parameters)
      source_file.getTree().propagateCoveredLineNumbers()
      source_file.getTree().propagateHeight()
      source_files.append(source_file)
      report.addFileName(file_name)
      logging.info('done')
    except:
      logging.warn('Can\'t parse "%s" \n: ' % (file_name,) + traceback.format_exc())

  for file_name in source_file_names:
      parse_file(file_name)

  duplicates = clone_detection_algorithm.findDuplicateCode(source_files, report)
  n = 1
  for duplicate in duplicates:
    distance = duplicate.calcDistance()
    summary = CloneSummary(
      "Clone #"+str(n),
      [  # TODO: This is a mess! Most of this info should be assembled on the fly and in member functions.
       Snippet(
        duplicate[i].getSourceFile()._file_name,
        duplicate[i].getCoveredLineNumbers(),
        '\n'.join([line for line in duplicate[i].getSourceLines()])
        ) for i in [0, 1]], distance)
    report.addClone(summary)
    n += 1
  report.sortByCloneSize()

  save_report(".orphanblack", report)
Ejemplo n.º 11
0
    def create_terminal(self, path, position=None):
        """Expose method for creating terminal tab."""
        options = Parameters()
        options.set("path", path)

        if position == "left":
            notebook = self._application.left_notebook

        elif position == "right":
            notebook = self._application.right_notebook

        else:
            notebook = self._application.get_active_notebook()

        self._application.create_tab(notebook, self._application.plugin_classes["system_terminal"], options)
Ejemplo n.º 12
0
	def create_tab(self, path, position=None):
		"""Expose method for creating standard tab."""
		options = Parameters()
		options.set('path', path)

		if position == 'left':
			notebook = self._application.left_notebook

		elif position == 'right':
			notebook = self._application.right_notebook

		else:
			notebook = self._application.get_active_notebook()

		self._application.create_tab(notebook, self._application.plugin_classes['file_list'], options)
Ejemplo n.º 13
0
 def __init__(self):
     self.words = list()
     self.list_number = 2
     self.min = dict()
     self.max = dict()
     self.first_list = list()
     self.second_list = list()
     self.first_list_output = list()
     self.second_list_output = list()
     self.minimum = None
     self.length = 0
     self.number_of_same = 0
     self.allow = True
     self.same = list()
     self.statistics = None
     self.key_for_differ_feature = ""
     self.which_higher = None
     self.p_values = list()
     self.time_begin = None
     self.success = True
     self.first_list_equality_counter = dict()
     self.second_list_equality_counter = dict()
     self.should_append_first = dict()
     self.should_append_second = dict()
     self.numeric_features = list()
     self.categorical_features = dict()
     self.categorical_features_list = list()
     self.len_of_numeric = 0
     self.len_of_categorical = 0
     self.parameters = Parameters()
Ejemplo n.º 14
0
    def __init__(self):
        print 'call me'
        self.parameters = Parameters()
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#        (self.graph_train, self.graph_predict, self.graph_verbose_predict) = graph.functions(self.parameters)
        import sets
        self.train_loss = MovingAverage()
        self.train_err = MovingAverage()
        self.train_lossnonzero = MovingAverage()
        self.train_squashloss = MovingAverage()
        self.train_unpenalized_loss = MovingAverage()
        self.train_l1penalty = MovingAverage()
        self.train_unpenalized_lossnonzero = MovingAverage()
        self.train_correct_score = MovingAverage()
        self.train_noise_score = MovingAverage()
        self.train_cnt = 0
Ejemplo n.º 15
0
    def __init__(self, modelname="", window_size=HYPERPARAMETERS["WINDOW_SIZE"], vocab_size=vocabulary.wordmap().len, embedding_size=HYPERPARAMETERS["EMBEDDING_SIZE"], hidden_size=HYPERPARAMETERS["HIDDEN_SIZE"], seed=miscglobals.RANDOMSEED, initial_embeddings=None, two_hidden_layers=HYPERPARAMETERS["TWO_HIDDEN_LAYERS"]):
        self.modelname = modelname
        self.parameters = Parameters(window_size, vocab_size, embedding_size, hidden_size, seed, initial_embeddings, two_hidden_layers)
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            if self.parameters.two_hidden_layers:
                graph.hidden2_weights = self.parameters.hidden2_weights
                graph.hidden2_biases = self.parameters.hidden2_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#        (self.graph_train, self.graph_predict, self.graph_verbose_predict) = graph.functions(self.parameters)
        import sets
        self.train_loss = MovingAverage()
        self.train_err = MovingAverage()
        self.train_lossnonzero = MovingAverage()
        self.train_squashloss = MovingAverage()
        self.train_unpenalized_loss = MovingAverage()
        self.train_l1penalty = MovingAverage()
        self.train_unpenalized_lossnonzero = MovingAverage()
        self.train_correct_score = MovingAverage()
        self.train_noise_score = MovingAverage()
        self.train_cnt = 0
Ejemplo n.º 16
0
def main():
    application = QApplication(sys.argv)
    application.quitOnLastWindowClosed = True

    params = Parameters()
    if os.path.isfile("default.cfg"):
        try:
            params.load_from_file("default.cfg")
        except:
            # Even if the operation fails the object should still be usable.
            pass

    window = ApplicationWidget(params)
    window.show()

    sys.exit(application.exec_())
Ejemplo n.º 17
0
def main(argv):

    print(argv)

    # Create and parse parameters.
    parameters = Parameters()
    parameters.parse(argv)

    # Do we need to show help?
    if parameters.help or len(parameters.directories) == 0:
        show_help(True)
        return

    # Handle each directory.
    for directory in parameters.directories:
        process_directory(directory, parameters.recurse, parameters.preview, 
            parameters.fileType)
Ejemplo n.º 18
0
def run_cli():
    """run_cli

    runs the pipeline from the command line interface.
    see -h flag for arguments
    """
    params = Parameters.from_command_line()
    run(params)
Ejemplo n.º 19
0
	def __init__(self, parameters=None, **kwargs):

		self.__H = None
		self.__derivative_ops = {}
		self.__derivative_ops_default = ['evolution']

		self.__named_states = {}  # A dictionary of named states for easy recollection
		self.__named_ensembles = {}  # A dictionary of named ensembles for density states
		self.__named_subspaces = {}  # A dictionary of named subspaces for easy identification
		self.__named_bases = {}  # A dictionary of named bases for state representation
		self.__basis_default = None

		self.init(**kwargs)

		# Initialise parameters instance
		if isinstance(parameters, str):
			self.p = Parameters.load(parameters, constants=True)
		elif isinstance(parameters, Parameters):
			self.p = parameters
		else:
			self.p = Parameters(constants=True)
		if isinstance(parameters, dict):
			self.p << parameters
		params = self.init_parameters()
		if isinstance(params, dict):
			self.p << params

		# Initialise Hamiltonian
		H = self.init_hamiltonian()
		if isinstance(H, (Operator, OperatorSet)):
			self.hamiltonian = H

		# Initialise Bases
		bases = self.init_bases()
		if isinstance(bases, dict):
			for name, basis in bases.items():
				self.add_basis(name, basis)

		# Initialise named states
		states = self.init_states()
		if isinstance(states, dict):
			for name, state in states.items():
				self.add_state(name, state)

		# Initialise derivative operators
		derivatives = self.init_derivative_ops()
		if isinstance(derivatives, dict):
			for name, derivative in derivatives.items():
				self.add_derivative_op(name, derivative)

		# Initialise measurements
		self.measure = Measurements(self)
		measurements = self.init_measurements()
		if isinstance(measurements, dict):
			for name, measurement in measurements:
				self.add_measurement(name, measurement)
Ejemplo n.º 20
0
	def _open_selected(self, widget, in_new_tab=False):
		"""Open selected mount"""
		selection = self._list.get_selection()
		item_list, selected_iter = selection.get_selected()

		if selected_iter is not None:
			uri = item_list.get_value(selected_iter, MountsColumn.URI)
			active_object = self._application.get_active_object()
			
			if not in_new_tab and hasattr(active_object, 'change_path'):
				active_object.change_path(uri)

			else:
				# create new tab
				options = Parameters()
				options.set('path', uri)

				self._application.create_tab(
								active_object._notebook,
								active_object.__class__,
								options
							)
		return True
Ejemplo n.º 21
0
def spike_detection_job(DatFileNames, ProbeFileName, output_dir, output_name):
    """
    Top level function that starts a data processing job.
    """
    for DatFileName in DatFileNames:
        if not os.path.exists(DatFileName):
            raise Exception("Dat file %s does not exist" % DatFileName)
    DatFileNames = [os.path.abspath(DatFileName)
                    for DatFileName in DatFileNames]

    probe = probes.Probe(ProbeFileName)

    n_ch_dat = Parameters['NCHANNELS']
    sample_rate = Parameters['SAMPLERATE']
    high_frequency_factor = Parameters['F_HIGH_FACTOR']
    set_globals_samples(sample_rate, high_frequency_factor)
    Parameters['CHUNK_OVERLAP'] = int(
        sample_rate * Parameters['CHUNK_OVERLAP_SECONDS'])

    Parameters['N_CH'] = probe.num_channels

    max_spikes = Parameters['MAX_SPIKES']

    basename = basenamefolder = output_name

   # OutDir = join(output_dir, basenamefolder)
    OutDir = output_dir
    with indir(OutDir):
        # Create a log file
        GlobalVariables['log_fd'] = open(basename + '.log', 'w')

        if Parameters['DEBUG']:
            GlobalVariables['debug_fd'] = open(basename + '.debug', 'w')

        Channels_dat = np.arange(probe.num_channels)
        # Print Parameters dictionary to .log file
        log_message("\n".join(["{0:s} = {1:s}".format(key, str(value))
                    for key, value in sorted(Parameters.iteritems()) if not key.startswith('_')]))
        spike_detection_from_raw_data(basename, DatFileNames, n_ch_dat,
                                      Channels_dat, probe.channel_graph,
                                      probe, max_spikes)

        numwarn = GlobalVariables['warnings']
        if numwarn:
            log_message(
                'WARNINGS ENCOUNTERED: ' + str(numwarn) + ', check log file.')

        # Close the log file at the end.
        if 'log_fd' in GlobalVariables:
            GlobalVariables['log_fd'].close()
Ejemplo n.º 22
0
	def execute_file(self, path, provider=None):
		"""Execute specified item properly."""
		mime_type = self.get_mime_type(path)
		terminal_type = self._application.options.section('terminal').get('type')
		should_execute = False

		if provider is not None and provider.is_local:
			# only allow local files which have execute
			# bit set to be executed locally
			should_execute = os.access(path, os.X_OK)

			# if we still don't know content type, try to guess
			if self.is_mime_type_unknown(mime_type):
				data = self.get_sample_data(path, provider)
				mime_type = self.get_mime_type(data=data)

		if gio.content_type_can_be_executable(mime_type) and should_execute:
			# file type is executable
			if is_x_app(path):
				subprocess.Popen(
							(path, '&'),
							cwd=os.path.dirname(path)
						)

			else:
				# command is console based, create terminal tab and fork it
				if terminal_type != TerminalType.EXTERNAL:
					active_object = self._application.get_active_object()

					options = Parameters()
					options.set('close_with_child', False)
					options.set('shell_command', path)
					options.set('path', os.path.dirname(path))

					tab = self._application.create_terminal_tab(active_object._notebook, options)

		else:
			# file type is not executable, try to open with default associated application
			default_application = self.get_default_application_for_type(mime_type)

			if default_application is not None:
				self.open_file((path,), default_application)

			else:
				# no default application selected, show application selection dialog
				dialog = ApplicationSelectDialog(self._application, path)
				result = dialog.get_response()

				if result[0] == gtk.RESPONSE_OK:
					self.open_file(selection=(path,), exec_command=result[2])
Ejemplo n.º 23
0
	def open_file(self, selection, application_info=None, exec_command=None):
		"""Open filename using config file or specified execute command"""
		if application_info is not None:
			# launch application using GIO API
			application = self.get_gio_application_by_id(application_info.id)

			if application is not None:
				if application.supports_uris():
					application.launch_uris(selection)
				else:
					application.launch([gio.File(path=path) for path in selection])
			
		elif exec_command is not None:
			# use specified command
			command = exec_command
		
			selection = map(lambda item: item.replace('"', '\\"'), selection)
			exec_string = self.__format_command_string(selection, command)

			# open selected file(s)
			split_command = shlex.split(exec_string, posix=False)
			test_command = split_command[0] if len(split_command) > 1 else exec_string

			if is_x_app(test_command):
				os.system('{0} &'.format(exec_string))

			else:
				active_object = self._application.get_active_object()

				options = Parameters()
				options.set('close_with_child', True)
				options.set('shell_command', split_command[0])
				options.set('arguments', split_command)
				options.set('path', os.path.dirname(selection[0]))

				self._application.create_terminal_tab(active_object._notebook, options)
Ejemplo n.º 24
0
folder = 'quadratic'
files = os.listdir(path=f'./data/{folder}/')

i = 0
for name in files:

    print(i)
    f = gzip.open(f'./data/{folder}/{name}', 'rb')
    net = pickle.load(f)
    f.close()

    params = Parameters(net.layers,
                        -1,
                        -1,
                        -1,
                        -1,
                        -1,
                        parameters=(net.weights, net.biases),
                        training_info="migrated from old class")

    os.remove(f'./data/{folder}/{name}')

    f = gzip.open(f'./data/{folder}/{name}', 'wb')

    pickle.dump(params, f)

    f.close()

    i += 1
Ejemplo n.º 25
0
# include script as parameter so different scripts can be executed

#scripts = ["optimize_top_1.py"]
#length = len(scripts)
#list_of_parameter_dictionaries
# #TODO add script
#and regularization selection parameters to parameters class, optimize script needs to be adjusted to react

all_parameters = []

parameters = Parameters(
    starname="Wolf294",
    defaultQ=False,
    start=11,
    end=53,
    chunk_size=5,
    niter=160,
    reg_file_star=
    '../wobble/regularization/Wolf294_star_K0_orders[11,54)_regtest1406.hdf5',
    reg_file_t=
    '../wobble/regularization/Wolf294_t_K3_orders[11,54)_regtest1406.hdf5',
    output_suffix='regtest1406_opt4')
parameters.dictionary.update({"script": "optimize_top_4.py"})
all_parameters.append(parameters.dictionary)

#test whether new reg wll run on  old files
#parameters = Parameters(starname = "Wolf294", defaultQ = False,
#start = 11,
#end = 54,
#chunk_size = 16,
#reg_file_star =  '../wobble/regularization/Wolf294_star_K0_orders[11,54)_regtest1406.hdf5',
#reg_file_t = '../wobble/regularization/Wolf294_t_K3_orders[11,54)_regtest1406.hdf5',
Ejemplo n.º 26
0
import os
from parameters import Parameters
from scipy.io.matlab.mio import savemat

params = Parameters()


def saveTensorToMat(x, varName='x', fileName='', save_dir=params.net_save_dir):
    '''
    x: is the variable to be saved
    name: is the name of the file and name of variable in the mat file
    '''
    if fileName is '':
        fileName = varName

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    savemat(save_dir + fileName + '.mat', {varName: x.cpu().data.numpy()})


def saveArrayToMat(x, varName='x', fileName='', save_dir=params.net_save_dir):
    '''
    x: is the variable to be saved
    name: is the name of the file and name of variable in the mat file
    '''
    if fileName is '':
        fileName = varName

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
Ejemplo n.º 27
0
 def __init__(self):
     self.parameters = Parameters()
     self.parameter_store = ParameterStore(self.parameters)
     self._uuid_mapping = {}
Ejemplo n.º 28
0
 def json_to_obj(combination_json: dict):
     return Combination(combination_id=combination_json['_id'],
                        compiler_name=combination_json['compiler_name'],
                        parameters=Parameters.json_to_obj(
                            combination_json['parameters']))
Ejemplo n.º 29
0
import combine_results as cr
from parameters import Parameters

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import wobble
from time import time
import h5py
import os
import sys
import ast
import yaml

#load parameters
parameters = Parameters(filename="yaml_temp/optimize_parameters_chunk.yaml")
parameter_dict = parameters.dictionary

start_order = parameter_dict["start_chunk"]  #TODO fix this name mismatch
end_order = parameter_dict["end_chunk"]
starname = parameter_dict["starname"]
K_star = parameter_dict["K_star"]
K_t = parameter_dict["K_t"]
epochs_list = parameter_dict["epochs_list"]
niter = parameter_dict["niter"]
defaultQ = parameter_dict["defaultQ"]

if True:
    plots = True
    epochs = [0, 50]  # to plot
    movies = False
def plot2():
    fig, ax = plt.subplots()
    ax.set_aspect('equal')
    ax.set_xlim(50, 250)
    ax.set_ylim(50, 250)

    # Initialize classes
    params = Parameters()
    target = Target(25, 25, 0)
    agents = []
    for i in range(params.nveh):
        agents.append(Agent(0, 25 * i, 0, params.monSpeed, 0, params, ax=ax))

    # Give the target a commanded speed
    target.send_cmd(3, 0)

    # Get first plan
    for i, agent in enumerate(agents):
        agent.detect_target(target.get_state())
        agent.compute_flight_traj(tf=params.tflight + i * params.tmon)
        agent.plot_arrow()

    # Plot initial states
    pts = target.get_state()
    trgtPlot = ax.plot(pts[0], pts[1], 'r*', markersize=10, label='Target')
    for i, agent in enumerate(agents):
        agent.plot_arrow()

    # Run the simulation
    for t in np.arange(0, params.tflight + params.nveh * params.tmon + 0.1,
                       0.1):
        # Update states
        target.update(t)
        for agent in agents:
            agent.update(t)

        # Detect target
        if t % params.detPer < 1e-6:
            for agent in agents:
                agent.detect_target(target.get_state())

        # Update plots
        pts = target.get_state()
        trgtPlot[0].set_data(pts[0], pts[1])
        for i, agent in enumerate(agents):
            agent.plot_arrow()

        if t >= 1:
            target.send_cmd(3, np.pi / 2)
        if t >= 1.5:
            target.send_cmd(3, 0)

        plt.pause(0.01)

    # Plot the inner and outer radii
    cir1 = Circle(target.get_state()[:2],
                  ls=':',
                  fill=False,
                  ec='r',
                  label='Outer Radius',
                  radius=params.outerR)
    cir2 = Circle(target.get_state()[:2],
                  ls=':',
                  fill=False,
                  ec='r',
                  label='Inner Radius',
                  radius=params.innerR)
    cir3 = Circle(target.get_state()[:2],
                  lw=None,
                  fc='gray',
                  alpha=0.5,
                  label='No Fly Zone',
                  radius=params.noflyR)
    ax.add_artist(cir1)
    ax.add_artist(cir2)
    ax.add_artist(cir3)

    # Draw legend and clean up Agent class
    ax.legend([trgtPlot[0]] + [agent._arrow for agent in agents],
              ['Target', 'Agent 1', 'Agent 2', 'Agent 3'])
    Agent.agentIdx = 0
    Agent.trajList = []
    Agent.timeList = []

    plt.title(f'$t = {t}$')
    return
Ejemplo n.º 31
0
def set_parameters(**kwargs):
    """ Set the values of parameters input from the command line.
    """
    logger.info("Parse arguments ...")

    ## Create a Parameters object to store parameters.
    params = Parameters()

    ## Process arguments.
    #
    if kwargs.get(Args.COMPARE_MODEL_NAME):
        params.compare_model_name = kwargs.get(Args.COMPARE_MODEL_NAME)
        logger.info("Compare model name: %s" % params.compare_model_name)

    if kwargs.get(Args.COMPARE_OUTPUT_DIRECTORY):
        params.compare_output_directory = kwargs.get(
            Args.COMPARE_OUTPUT_DIRECTORY)
        if not os.path.exists(params.compare_output_directory):
            logger.error("The compare output directory '%s' was not found." %
                         params.compare_output_directory)
            return None
        logger.info("Compare output directory: '%s'." %
                    params.compare_output_directory)

    params.data_name = kwargs.get(Args.DATA_NAME)
    logger.info("Data name: %s" % params.data_name)

    params.group = kwargs.get(Args.GROUP)
    logger.info("Group number: %s" % params.group)

    params.model_name = kwargs.get(Args.MODEL_NAME)
    logger.info("Model name: %s" % params.model_name)

    params.output_directory = kwargs.get(Args.OUTPUT_DIRECTORY)
    if not os.path.exists(params.output_directory):
        logger.error("The output directory '%s' was not found." %
                     params.output_directory)
        return None
    logger.info("Output directory: '%s'." % params.output_directory)

    params.model_name = kwargs.get(Args.MODEL_NAME)
    logger.info("Model name: %s" % params.model_name)

    params.segment = kwargs.get(Args.SEGMENT)
    logger.info("Segment number: %s" % params.segment)

    if kwargs.get(Args.TIME_STEP):
        params.time_step = float(kwargs.get(Args.TIME_STEP))
    logger.info("Time step: %g" % params.time_step)

    return params
import os
from time import time
import wobble
import h5py
import numpy as np
import shutil
import yaml
#NOTE WORKS only in Wob_env_2 (wobble_19_03_2019, deprecated in wobble_14_06_2019)
#TODO pass results_file_base to chunk
if __name__ == "__main__":  #NOTE If called via os.system this will trigger even if in queue
    queue = True
    #################### parameters

    if queue:
        parameter_filename = "yaml_temp/optimize_parameters.yaml"
        parameters = Parameters(filename=parameter_filename)

        starname = parameters.dictionary["starname"]
        K_star = parameters.dictionary["K_star"]
        K_t = parameters.dictionary["K_t"]
        niter = parameters.dictionary["niter"]
        start = parameters.dictionary["start"]
        end = parameters.dictionary["end"]
        chunk_size = parameters.dictionary["chunk_size"]
        defaultQ = parameters.dictionary["defaultQ"]
        if defaultQ:
            default_str = "_def"
        else:
            default_str = ""

        parameter_dict = parameters.dictionary
Ejemplo n.º 33
0
    def __init__(self, nbr_of_agents):
        self.co = ConsoleOutput()
        self.co.t_print("INITIALISING CHARGING MODEL")
        self.parameters = Parameters()
        self.num_agents = nbr_of_agents
        # time step and time step limit in minutes
        self.clock = Clock(self.parameters)

        self.cmm = CarModelManager()
        self.chm = ChargerManager()
        self.epm = ElectricityPlanManager(self.parameters, self.clock)
        self.cpm = CompanyManager(self.parameters, self.clock, self.chm,
                                  self.epm)
        self.lrm = LocationRoadManager(self.cpm)
        self.wm = WhereaboutsManager(self.lrm, self.clock)
        self.cp = CalendarPlanner(self.parameters, self.clock, self.lrm)
        self.hcm = HouseConsumptionManager(self.clock)
        self.hgm = HouseGenerationManager(self.parameters, self.clock,
                                          self.epm, self.cmm)
        self.schedule_cars = RandomActivation(self)
        self.schedule_houses = RandomActivation(self)
        # extra promil is needed as east_west_spread and north_south_spread
        # are OPEN interval limits and agents can not be placed on this border
        # point
        self.space = ContinuousSpace(self.lrm.east_west_spread * 1.001,
                                     self.lrm.north_south_spread * 1.001,
                                     False)

        self.extracted_lon = []
        self.extracted_lat = []
        self.extracted_soc = []

        # create agents
        self.co.t_print("Start to create agents")
        for agent_uid in range(self.num_agents):
            residency_location = self.lrm.draw_location_of_residency()
            house_agent = HouseAgent(agent_uid, self, self.clock,
                                     residency_location, self.chm, self.epm,
                                     self.hcm, self.hgm)
            employment_location = residency_location
            while employment_location == residency_location:
                employment_location = \
                    self.lrm.draw_location_of_employment(residency_location)
            company = self.cpm.add_employee_to_location(employment_location)

            #TODO reconsider if agents should all start at home
            cur_location = residency_location
            pos = self.lrm.relative_location_position(cur_location)

            car_agent = CarAgent(agent_uid, self, self.clock, cur_location,
                                 house_agent, company, self.lrm, self.cmm,
                                 self.wm, self.cp, self.parameters)
            self.schedule_houses.add(house_agent)
            self.schedule_cars.add(car_agent)
            self.space.place_agent(car_agent, pos)
            self.extracted_lon.append([])
            self.extracted_lat.append([])
            self.extracted_soc.append([])

        self.co.t_print("Agent creation complete")
        self.co.t_print("INITIALISATION COMPLETE")
        self.co.t_print("")
        self.co.t_print("COMMENCING STEP CALCULATION")
Ejemplo n.º 34
0
                
            params_save_path = '/net/data1/ml2017/gpyparams/E_curvelhs_'+ kernel + (str)index + times +'.pickle'
            params_load_path = '/net/data1/ml2017/gpyparams/E_curvelhs_'+ kernel + (str)index + times +'.pickle'
            #------------------------------------------------------------------------------------------
            # Actual code

            # Training dimension, do we want to train on energy or only lecs?
            # This is for generation
            parameter_dim = LEC_LENGTH
            energy_as_param = False
            if energy[1] != energy[0]:
                parameter_dim += 1
                energy_as_param = True

            # Set up necessary classes)
            param = Parameters(interval, samples, center_lecs=lec_center)
            nsopt = NsoptCaller()
            gauss = Gaussfit()
            dm = Datamanager(echo=False)
            gauss.save_fig = save_fig
            gauss.save_path = save_fig_path

            @profile
            def get_observable(energies, lecs):
                """Wrapper function to measure time with memory profiler."""
                return nsopt.get_nsopt_observable(energies,LECM=lecs)

            @profile
            def train_gp_model(train_obs, train_lecs):
                """Populate and optimize GP model."""
                gauss.populate_gp_model(train_obs, train_lecs, rescale=rescale_data)
Ejemplo n.º 35
0
def get_file_params(params_file='sailnet_parameters.txt'):
    return Parameters('sailnet_parameters.txt')
Ejemplo n.º 36
0
plus45 = 'C-Bracket_0.50_plus45WithPlate.STL'
paramFile = 'Print1.json'

fnames = (zero, minus45, plus45)
colors = 'rgb'
angles = (0, -45, 45)

SecAng = namedtuple('SecAng', 'section angle')

secAngs = []
meshes = []

with open(path + paramFile, 'r') as fp:
    data = json.load(fp)

params = Parameters(data[0], data[1])

for fname, color, angle in zip(fnames, colors, angles):
    mesh = trimesh.load_mesh(path + fname)
    meshes.append(mesh)
    section = mesh.section(plane_origin=[0, 0, 0.01], plane_normal=[0, 0, 1])
    loops = section.discrete
    for loop in loops:
        outline = Outline()
        outline._name = fname
        outline.addCoordLoop(loop * 1000)  # 1000 to convert from meters to mm
        outline = outline.translate(-50, -12)
        secAngs.append(SecAng(Section(outline), angle))
        plt.plot(loop[:, 0] - 0.050, loop[:, 1] - 0.012, color)

params.outline = [tuple(secAngs)]
Ejemplo n.º 37
0
#!/usr/bin/env python
import numpy
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import config
from parameters import Parameters

if __name__ == '__main__':
    conf = config.get_config()
    output_folder = os.path.expanduser(conf.get('Output', 'OutputFolder'))
    n_folds = int(conf.get('CrossValidation', 'NumberOfFolds'))

    if not os.path.isdir(output_folder):
        raise StandardError("Experiment folder %s does not exists." % output_folder)

    print("Evaluating Cross-validation Scores for experiment %s..." % output_folder)
    parameters_files = ["parameters%i.pkl" % (x + 1) for x in xrange(n_folds)]
    parameters = [Parameters.load_from_pickle_file(os.path.join(output_folder, x)) for x in parameters_files]
    test_accuracies = numpy.array([1.0 - x.test_error for x in parameters])
    print("Accuracy: %0.4f (+/- %0.4f)" % (test_accuracies.mean(), test_accuracies.std() * 2))
Ejemplo n.º 38
0
DATASET = 'epsilon.pickle'
NUM_EPOCH = 10

print('load dataset')
dataset = os.path.join(DATA_DIR, DATASET)
with open(dataset, 'rb') as f:
    X, y = pickle.load(f)

n, d = X.shape

params = []

params.append(
    Parameters(name="full-sgd",
               num_epoch=NUM_EPOCH,
               lr_type='bottou',
               initial_lr=10.,
               regularizer=1 / n,
               estimate='mean'))
params.append(
    Parameters(name="top1",
               num_epoch=NUM_EPOCH,
               lr_type='bottou',
               initial_lr=10.,
               regularizer=1 / n,
               estimate='mean',
               take_k=1,
               take_top=True,
               with_memory=True))
params.append(
    Parameters(name="rand1",
               num_epoch=NUM_EPOCH,
Ejemplo n.º 39
0
 print('Starting experiment n.', str(exp))
 run_folder = exp_folder + '/run_' + str(run) + '/'
 print('Current folder: ', run_folder)
 if not os.path.exists(run_folder):
     os.makedirs(run_folder)
 os.chdir(run_folder)
 directory_results = run_folder
 directory_models = run_folder + 'models/'
 directory_plots = run_folder + 'plots/'
 directory_plots_gif = run_folder + 'plots/gif/'
 #os.makedirs(directory_results)
 os.makedirs(directory_models)
 os.makedirs(directory_plots)
 os.makedirs(directory_plots_gif)
 # create parameters object
 param = Parameters()
 param.set('directory_results', run_folder)
 param.set('directory_models', run_folder + 'models/')
 param.set('directory_plots', run_folder + 'plots/')
 param.set('directory_plots_gif', run_folder + 'plots/gif/')
 param.set('directory_datasets', datasets_folder)
 dataset_type = doe.loc[exp, 'dataset_train_type']
 if dataset_type == 0:
     param.set('dataset_train_type', 'icub_alone')
     param.set('directory_datasets_train',
               datasets_folder + 'icub_alone/')
 elif dataset_type == 1:
     param.set('dataset_train_type', 'icub_and_ball')
     param.set('directory_datasets_train',
               datasets_folder + 'icub_and_ball/')
 elif dataset_type == 2:
Ejemplo n.º 40
0
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    param.preparedata()

    param.creatScheduler()
    param.createOptimizer()
    param.createLoss()

    for epoch in range(param.numepoch):
        #Train
        loss, acc = param.train(epoch)
        train_accs.append(acc)
        train_losses.append(loss)
        #val
        loss, acc = param.val(epoch)
        val_accs.append(acc)
        val_losses.append(loss)
        param.scheduler.step()
        if acc>best_acc:
            best_acc = acc
            print("Best model so far")
            torch.save(param.Net, "model.pth")
            f=open("log.txt", "a")
            f.write("RUN: BatchSize"+str(cfg.BatchSizes.BatchSize)+ " BertLayer"+ str(cfg.db.Bertlayer)+str(acc)+"\n")
            f.close()



if __name__ == "__main__":
    param=Parameters()
    BertMainFunction()
from glob import glob
from parameters import Parameters
from os.path import splitext, basename

input_dir = '/home/sauvan/Documents/HEP/Projects/CMS/L1CalorimeterTrigger_Phase2HGCal/Misc/FastShower/output/'

parameters = Parameters()
parameters.input_files = sorted([(splitext(basename(f))[0], f)
                                 for f in glob(input_dir + '*large*.root')])
parameters.tree = 'tree'
parameters.log_weights = True
parameters.output_file = 'output/output_largecells_logweights.root'
Ejemplo n.º 42
0
def set_parameters(**kwargs):
    """ Set the values of parameters input from the command line.
    """
    logger.info("Parse arguments ...")

    ## Create a Parameters object to store parameters.
    params = Parameters()

    ## Process arguments.
    #
    if kwargs.get(Args.OUTPUT_DIRECTORY):
        params.output_directory = kwargs.get(Args.OUTPUT_DIRECTORY)
        if not os.path.exists(params.output_directory):
            logger.error("The output directory '%s' was not found." %
                         params.output_directory)
            return None
        logger.info("Output directory: '%s'." % params.output_directory)

    if kwargs.get(Args.RESULTS_DIRECTORY):
        params.results_directory = kwargs.get(Args.RESULTS_DIRECTORY)
        if not os.path.exists(params.results_directory):
            logger.error("The results directory '%s' was not found." %
                         params.results_directory)
            return None
        logger.info("Results directory: '%s'." % params.results_directory)

    params.output_file_name = kwargs.get(Args.OUTPUT_FILE)
    logger.info("Output file name: %s" % params.output_file_name)

    params.output_format = kwargs.get(Args.OUTPUT_FORMAT)
    logger.info("Output format: %s" % params.output_format)

    params.solver_file_name = kwargs.get(Args.SOLVER_FILE)
    logger.info("Solver file name: %s" % params.solver_file_name)

    params.data_names = kwargs.get(Args.DATA_NAMES).split(",")
    logger.info("Data names: %s" % ','.join(params.data_names))

    if kwargs.get(Args.OUTLET_SEGMENTS):
        params.outlet_segments = True
        logger.info("Outlet segments: %s" % params.outlet_segments)

    if kwargs.get(Args.ALL_SEGMENTS):
        params.all_segments = True
        logger.info("All segments: %s" % params.all_segments)

    if kwargs.get(Args.SELECT_SEGMENTS):
        params.select_segment_names = True
        logger.info("Select segments: %s" % params.select_segment_names)

    if kwargs.get(Args.DISPLAY_GEOMETRY):
        params.display_geometry = (kwargs.get(Args.DISPLAY_GEOMETRY)
                                   in ["on", "true"])
        logger.info("Display geometry: %s" % params.display_geometry)

    if kwargs.get(Args.NODE_SPHERE_RADIUS):
        params.node_sphere_radius = float(kwargs.get(Args.NODE_SPHERE_RADIUS))

    if kwargs.get(Args.PLOT):
        params.plot_results = (kwargs.get(Args.PLOT) in ["on", "true"])
        logger.info("Plot results: %s" % params.plot_results)

    if kwargs.get(Args.SEGMENTS):
        params.segment_names = kwargs.get(Args.SEGMENTS).split(",")
        logger.info("Segments: %s" % ','.join(params.segment_names))

    if kwargs.get(Args.TIME_RANGE):
        params.time_range = [
            float(s) for s in kwargs.get(Args.TIME_RANGE).split(",")
        ]
        logger.info("Time range: %s" % ','.join(map(str, params.time_range)))

    return params
Ejemplo n.º 43
0
#########################################################################
##
## Some utility for training, data processing, and network.
##
#########################################################################
import torch
import torch.nn as nn
from parameters import Parameters
from UNet_2Plus import UNet_2Plus
p = Parameters()


######################################################################
##
## Convolution layer modules
##
######################################################################
class Conv2D_BatchNorm_Relu(nn.Module):
    def __init__(self,
                 in_channels,
                 n_filters,
                 k_size,
                 padding,
                 stride,
                 bias=True,
                 acti=True,
                 dilation=1):
        super(Conv2D_BatchNorm_Relu, self).__init__()

        if acti:
            self.cbr_unit = nn.Sequential(
Ejemplo n.º 44
0
"""

# Set environment and parameters
if args.mux:
    env = environment.Multiplexer(args)
elif args.chk and args.chk_div:
    env = environment.Checkerboard(args)
elif args.mario:
    env = environment.MarioClassification(args)
elif args.csv:
    env = environment.Dataset(args)
else:
    print('Choose the environment, please.')
    sys.exit(1)

parameters = Parameters(env.num_actions, args)
helper = Helper(args, env)

print("[ Settings ]")
print("    Environment =", env)
print("  NumIterations =", env.iteration)
if env.condense_iter != 0:
    print("NumCondenseIter =", env.condense_iter)
if env.repr == 'cs':
    print("  ConditionRepr = CSR")
elif env.repr == 'lu':
    print("  ConditionRepr = OBR")
elif env.repr == 'ub':
    print("  ConditionRepr = UBR")

print("\n[ XCSR General Parameters ]")
Ejemplo n.º 45
0
    def optimize_bollinger(self):

        ##values to test out

        ##14400
        if "14400" in self.test_table_array[0]:
            self.bb_factor = [2.5]
            self.stddev_adjust = [True, False]
            self.avg_period = [40]
            self.num_past_buy = [0, 1, 2]
            self.num_past_sell = [2, 3, 4]
            self.one_op = [False]
            self.to_carry = [True, False]

        ##7200
        elif "7200" in self.test_table_array[0]:
            self.bb_factor = [2, 2.5, 3]
            self.stddev_adjust = [False]
            self.avg_period = [60, 80, 100, 120]
            self.num_past_buy = [0, 1]
            self.num_past_sell = [2, 4, 6, 8]
            self.one_op = [False]
            self.to_carry = [False]

        ##1800
        ##self.bb_factor = [2, 2.5, 3]
        ##self.stddev_adjust = [True, False]
        ##self.avg_period = [240, 320, 400, 480]
        ##self.num_past_buy = [0, 8]
        ##self.num_past_sell = [0 , 8, 16, 24]

        self.parameters_array = []

        for bb in self.bb_factor:
            for std in self.stddev_adjust:
                for period in self.avg_period:
                    for num_buy in self.num_past_buy:
                        for num_sell in self.num_past_sell:
                            for oo in self.one_op:
                                for tc in self.to_carry:
                                    p = Parameters(bb, std, period, num_buy,
                                                   num_sell, oo, tc)
                                    strat_array = []
                                    for tn in self.test_table_array:
                                        strat = BollingerStrategy(
                                            tn,
                                            bb_factor=bb,
                                            stddev_adjust=std,
                                            avg_period=period,
                                            num_past_buy=num_buy,
                                            num_past_sell=num_sell,
                                            one_op=oo,
                                            to_carry=tc)
                                        strat_array.append(strat)
                                    trade_sim = TradeSimulator(
                                        self.test_table_array,
                                        strat_array,
                                        to_log=False)
                                    print(
                                        "*************************************************************************************************************************"
                                    )
                                    print(("bb: ", bb, " std: ", std,
                                           " period: ", period, " num_buy: ",
                                           num_buy, " num_sell: ", num_sell,
                                           " one_op: ", oo, " to_carry: ", tc))
                                    trade_sim.run()
                                    print(
                                        "*************************************************************************************************************************"
                                    )

                                    p.set_balance(trade_sim.balance)
                                    p.set_percent_profit(
                                        trade_sim.profit_percent)
                                    self.parameters_array.append(p)

        self.print_summary([
            "bb_factor", "stddev_adjust", "avg_period", "num_past_buy",
            "num_past_sell", "to_carry", "one_op"
        ])
Ejemplo n.º 46
0
#import numpy as np
import dataset_readers as data
import image_encoders as encoder
from parameters import Parameters
from nupic.research.spatial_pooler import SpatialPooler
#from nupic.encoders import ScalarEncoder
from vision_testbench import VisionTestBench


# Get training images and convert them to vectors.
trainingImages, trainingTags = data.getImagesAndTags(trainingDataset)
trainingVectors = encoder.imagesToVectors(trainingImages)


# Specify parameter values to search
parameters = Parameters()
parameters.define("dataSet", ['20.xml'])
#parameters.define("dataSet",[
  #'1.xml','2.xml', '3.xml', '4.xml', '5.xml', '6.xml', '7.xml', '8.xml',
  #'9.xml', '10.xml', '11.xml', '12.xml', '13.xml', '14.xml', '15.xml',
  #'16.xml', '17.xml', '18.xml', '19.xml', '20.xml', '21.xml', '22.xml',
  #'23.xml', '24.xml', '25.xml', '26.xml', '27.xml', '28.xml', '29.xml',
  #'30.xml', '31.xml', '32.xml', '33.xml', '34.xml', '35.xml', '36.xml',
  #'37.xml', '38.xml', '39.xml', '40.xml', '41.xml', '42.xml', '43.xml',
  #'44.xml', '45.xml', '46.xml', '47.xml', '48.xml', '49.xml', '50.xml',
  #'51.xml', '52.xml', '53.xml', '54.xml', '55.xml', '56.xml', '57.xml',
  #'58.xml', '59.xml', '60.xml', '61.xml', '62.xml'])
parameters.define("numCols", [16])
#parameters.define("numCols", [256,512,1024,2048])
parameters.define("synPermConn", [0.3])
#parameters.define("synPermConn", [0.9, 0.7, 0.5, 0.3, 0.1])
Ejemplo n.º 47
0
import dataset_readers as data
import image_encoders as encoder
from parameters import Parameters
from nupic.research.spatial_pooler import SpatialPooler
from vision_testbench import VisionTestBench
from classifiers import KNNClassifier

minAccuracy = 200.0
maxTrainingCycles = 5



if __name__ == "__main__":
  # Specify parameter values to search
  parameters = Parameters()
  parameters.define("dataSet",[
    '1.xml','2.xml', '3.xml', '4.xml', '5.xml', '6.xml', '7.xml', '8.xml',
    '9.xml', '10.xml', '11.xml', '12.xml', '13.xml', '14.xml', '15.xml',
    '16.xml', '17.xml', '18.xml', '19.xml', '20.xml', '21.xml', '22.xml',
    '23.xml', '24.xml', '25.xml', '26.xml', '27.xml', '28.xml', '29.xml',
    '30.xml', '31.xml', '32.xml', '33.xml', '34.xml', '35.xml', '36.xml',
    '37.xml', '38.xml', '39.xml', '40.xml', '41.xml', '42.xml', '43.xml',
    '44.xml', '45.xml', '46.xml', '47.xml', '48.xml', '49.xml', '50.xml',
    '51.xml', '52.xml', '53.xml', '54.xml', '55.xml', '56.xml', '57.xml',
    '58.xml', '59.xml', '60.xml', '61.xml', '62.xml'])


  # Run the model until all combinations have been tried
  while parameters.getNumResults() < parameters.numCombinations:
Ejemplo n.º 48
0
from classifiers import KNNClassifier

trainingDataset = 'DataSets/OCR/characters/cmr_hex.xml'
minAccuracy = 100.0
maxTrainingCycles = 5
testingDataset = 'DataSets/OCR/characters/cmr_hex.xml'



if __name__ == "__main__":
  # Get training images and convert them to vectors.
  trainingImages, trainingTags = data.getImagesAndTags(trainingDataset)
  trainingVectors = encoder.imagesToVectors(trainingImages)

  # Specify parameter values to search
  parameters = Parameters()
  parameters.define("synPermConn", [0.5])
  parameters.define("synPermDecFrac", [1.0, 0.5, 0.1])
  parameters.define("synPermIncFrac", [1.0, 0.5, 0.1])


  # Run the model until all combinations have been tried
  while parameters.getNumResults() < parameters.numCombinations:

    # Pick a combination of parameter values
    parameters.nextCombination()
    #parameters.nextRandomCombination()
    synPermConn = parameters.getValue("synPermConn")
    synPermDec = synPermConn*parameters.getValue("synPermDecFrac")
    synPermInc = synPermConn*parameters.getValue("synPermIncFrac")
def plot_multiple_runs(main_path,
                       multiple_experiments_folder,
                       num_experiments,
                       num_runs,
                       save=True):

    #data_mse_inv = []
    #data_mse_fwd = []
    #data_slope = []
    #data_intercept = []
    mean_mse_fwd = []
    mean_mse_inv = []
    mean_slope = []
    mean_intercept = []
    stddev_mse_fwd = []
    stddev_mse_inv = []
    stddev_slope = []
    stddev_intercept = []

    full_path = main_path + '/' + multiple_experiments_folder + '/'
    for exp in range(num_experiments):

        full_path_exp = full_path + 'exp' + str(exp) + '/'

        data_mse_inv = []
        data_mse_fwd = []
        data_slope = []
        data_intercept = []

        for run in range(num_runs):

            directory = full_path_exp + 'run_' + str(run) + '/'

            parameters = Parameters()
            # parameters.set('goal_selection_mode', 'som')
            parameters.set('run_id', run)
            parameters.set('directory_main', directory)
            parameters.set('directory_results', directory + 'results/')
            parameters.set('directory_plots', directory + 'plots/')

            data_mse_fwd.append(
                np.load(parameters.get('directory_results') + 'mse_fwd.npy'))
            data_mse_inv.append(
                np.load(parameters.get('directory_results') + 'mse_inv.npy'))
            regr = np.load(
                parameters.get('directory_results') +
                'im_linregr_mse_vs_raw_mov.npy')
            data_slope.append(regr[2])
            data_intercept.append(regr[4])

        mean_mse_fwd.append(np.mean(np.asarray(data_mse_fwd), axis=0))
        mean_mse_inv.append(np.mean(np.asarray(data_mse_inv), axis=0))
        mean_slope.append(np.mean(np.asarray(data_slope), axis=0))
        mean_intercept.append(np.mean(np.asarray(data_intercept), axis=0))

        stddev_mse_fwd.append(np.std(np.asarray(data_mse_fwd), axis=0))
        stddev_mse_inv.append(np.std(np.asarray(data_mse_inv), axis=0))
        stddev_slope.append(np.std(np.asarray(data_slope), axis=0))
        stddev_intercept.append(np.std(np.asarray(data_intercept), axis=0))

        fig1 = plt.figure(figsize=(10, 10))
        plt.title('MSE forward model')
        plt.ylabel('MSE')
        plt.xlabel('Time')
        #plot1, = plt.plot(mean_mse_fwd, color='#CC4F1B', label='db')
        #plt.fill_between(np.arange(len(mean_mse_fwd)), mean_mse_fwd-stddev_mse_fwd, mean_mse_fwd+stddev_mse_fwd, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
        plt.errorbar(range(len(mean_mse_fwd[-1])),
                     mean_mse_fwd[-1],
                     stddev_mse_fwd[-1],
                     capsize=5,
                     errorevery=499)
        if save:
            filename = full_path_exp + 'exp_' + str(exp) + '_mse_fwd.jpg'
            plt.savefig(filename)
            plt.close()

        fig1 = plt.figure(figsize=(5, 5))
        plt.title('MSE inverse model')
        plt.ylabel('MSE')
        plt.xlabel('Time')
        #plot1, = plt.plot(mean_mse_inv, color='#CC4F1B', label='db')
        #plt.fill_between(np.arange(len(mean_mse_inv)), mean_mse_inv-stddev_mse_inv, mean_mse_inv+stddev_mse_inv, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
        plt.errorbar(range(len(mean_mse_inv[-1])),
                     mean_mse_inv[-1],
                     stddev_mse_inv[-1],
                     capsize=5,
                     errorevery=499)

        if save:
            filename = full_path_exp + 'exp_' + str(exp) + '_mse_inv.jpg'
            plt.savefig(filename)
            plt.close()

        fig1 = plt.figure(figsize=(5, 5))
        plt.title('MSE slopes VS movements')
        plt.ylabel('Movement amplitude')
        plt.xlabel('Slope of MSE')
        for run in range(num_runs):
            x_vals = np.array(plt.xlim())
            y_vals = data_intercept[run] + data_slope[run] * x_vals
            plt.plot(x_vals, y_vals, '-', color='b', alpha=0.5)

        x_vals = np.array(plt.xlim())
        y_vals = mean_intercept[-1] + mean_slope[-1] * x_vals
        plt.plot(x_vals, y_vals, '--', color='r')

        if save:
            filename = full_path_exp + 'exp_' + str(exp) + '_correlation.jpg'
            plt.savefig(filename)
            plt.close()

    fig1 = plt.figure(figsize=(8, 6))
    plt.ylabel('MSE forward model')
    #plt.ylim(0, 2)
    plt.xlabel('Time')
    for i in range(len(mean_mse_fwd)):
        lab = 'exp_' + str(i)
        plt.errorbar(range(len(mean_mse_fwd[i])),
                     mean_mse_fwd[i],
                     stddev_mse_fwd[i],
                     label=lab,
                     capsize=5,
                     errorevery=(500 - 20 * i))
    plt.legend()
    if save:
        filename = full_path + 'multiexp_mse_fwd.jpg'
        plt.savefig(filename)
        plt.close()

    fig1 = plt.figure(figsize=(8, 6))
    plt.ylabel('MSE inverse models')
    #plt.ylim(0, 2)
    plt.xlabel('Time')
    for i in range(len(mean_mse_inv)):
        lab = 'exp_' + str(i)
        plt.errorbar(range(len(mean_mse_inv[i])),
                     mean_mse_inv[i],
                     stddev_mse_inv[i],
                     label=lab,
                     capsize=5,
                     errorevery=(500 - 25 * i))
    plt.legend()
    if save:
        filename = full_path + 'multiexp_mse_inv.jpg'
        plt.savefig(filename)
        plt.close()
Ejemplo n.º 50
0
 def __init__(self):
     self.params = Parameters()
    #parameters = Parameters(starname = "GJ436", defaultQ = False,
    #data_suffix = "_vis_drift_shift",
    #start = 11,
    #end = 53,
    #chunk_size = 5,
    #niter = 160,
    #reg_file_star =  starting_star_reg,
    #reg_file_t = starting_t_reg,
    #output_suffix = 'reg_search_test')

    parameters = Parameters(starname="GJ436",
                            defaultQ=False,
                            data_suffix="_nir_drift_shift_split",
                            start=0,
                            end=56,
                            chunk_size=5,
                            niter=160,
                            reg_file_star=starting_star_reg,
                            reg_file_t=starting_t_reg,
                            output_suffix='reg_search_test')

    parameter_filename = "yaml_temp/optimize_parameters.yaml"
    parameters.write(parameter_filename)
    parameters = Parameters(filename=parameter_filename)

    starname = parameters.dictionary["starname"]
    K_star = parameters.dictionary["K_star"]
    K_t = parameters.dictionary["K_t"]
    niter = parameters.dictionary["niter"]
    output_suffix = parameters.dictionary["output_suffix"]
    start = parameters.dictionary["start"]
Ejemplo n.º 52
0
class Model:
    """
    A Model can:

    @type parameters: L{Parameters}
    @todo: Document
    """

    def __init__(self):
        print 'call me'
        self.parameters = Parameters()
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#        (self.graph_train, self.graph_predict, self.graph_verbose_predict) = graph.functions(self.parameters)
        import sets
        self.train_loss = MovingAverage()
        self.train_err = MovingAverage()
        self.train_lossnonzero = MovingAverage()
        self.train_squashloss = MovingAverage()
        self.train_unpenalized_loss = MovingAverage()
        self.train_l1penalty = MovingAverage()
        self.train_unpenalized_lossnonzero = MovingAverage()
        self.train_correct_score = MovingAverage()
        self.train_noise_score = MovingAverage()
        self.train_cnt = 0

    def __getstate__(self):
        return (self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt)

    def __setstate__(self, state):
        (self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt) = state
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#    def load(self, filename):
#        sys.stderr.write("Loading model from: %s\n" % filename)
#        f = myopen(filename, "rb")
#        (self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt) = pickle.load(f)
#        if LBL:
#            graph.output_weights = self.parameters.output_weights
#            graph.output_biases = self.parameters.output_biases
#            graph.score_biases = self.parameters.score_biases
#        else:
#            graph.hidden_weights = self.parameters.hidden_weights
#            graph.hidden_biases = self.parameters.hidden_biases
#            graph.output_weights = self.parameters.output_weights
#            graph.output_biases = self.parameters.output_biases
#
#    def save(self, filename):
#        sys.stderr.write("Saving model to: %s\n" % filename)
#        f = myopen(filename, "wb")
#        pickle.dump((self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt), f)

    def embed(self, sequence):
        """
        Embed a sequence of vocabulary IDs
        """
        seq = [self.parameters.embeddings[s] for s in sequence]
        import numpy
        return [numpy.resize(s, (1, s.size)) for s in seq]
#        return [self.parameters.embeddings[s] for s in sequence]

    def embeds(self, sequences):
        """
        Embed sequences of vocabulary IDs.
        If we are given a list of MINIBATCH lists of SEQLEN items, return a list of SEQLEN matrices of shape (MINIBATCH, EMBSIZE)
        """
        embs = []
        for sequence in sequences:
            embs.append(self.embed(sequence))

        for emb in embs: assert len(emb) == len(embs[0])

        new_embs = []
        for i in range(len(embs[0])):
            colembs = [embs[j][i] for j in range(len(embs))]
            import numpy
            new_embs.append(numpy.vstack(colembs))
            assert new_embs[-1].shape == (len(sequences), self.parameters.embedding_size)
        assert len(new_embs) == len(sequences[0])
        return new_embs

    def corrupt_example(self, e):
        """
        Return a corrupted version of example e, plus the weight of this example.
        """
        from hyperparameters import HYPERPARAMETERS
        import random
        import copy
        e = copy.copy(e)
        last = e[-1]
        cnt = 0
        while e[-1] == last:
            if HYPERPARAMETERS["NGRAM_FOR_TRAINING_NOISE"] == 0:
                e[-1] = random.randint(0, self.parameters.vocab_size-1)
                pr = 1./self.parameters.vocab_size
            elif HYPERPARAMETERS["NGRAM_FOR_TRAINING_NOISE"] == 1:
                import noise
                from common.myrandom import weighted_sample
                e[-1], pr = weighted_sample(noise.indexed_weights())
#                from vocabulary import wordmap
#                print wordmap.str(e[-1]), pr
            else:
                assert 0
            cnt += 1
            # Backoff to 0gram smoothing if we fail 10 times to get noise.
            if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)
        weight = 1./pr
        return e, weight

    def corrupt_examples(self, correct_sequences):
        noise_sequences = []
        weights = []
        for e in correct_sequences:
            noise_sequence, weight = self.corrupt_example(e)
            noise_sequences.append(noise_sequence)
            weights.append(weight)
        return noise_sequences, weights

    def train(self, correct_sequences):
        from hyperparameters import HYPERPARAMETERS
        learning_rate = HYPERPARAMETERS["LEARNING_RATE"]
        if LBL:
            noise_sequences, weights = self.corrupt_examples(correct_sequences)
            # All weights must be the same, if we first multiply by the learning rate
            for w in weights: assert w == weights[0]

            # REWRITE FOR MINIBATCH
            assert 0

#            noise_repr = noise_sequence[-1]
#            correct_repr = correct_sequence[-1]
            noise_repr = noise_sequence[-1:]
            correct_repr = correct_sequence[-1:]
            assert noise_repr != correct_repr
            assert noise_sequence[:-1] == correct_sequence[:-1]
            sequence = correct_sequence[:-1]
#            r = graph.train(self.embed(sequence), self.embed([correct_repr])[0], self.embed([noise_repr])[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr])
            r = graph.train(self.embed(sequence), self.embed(correct_repr)[0], self.embed(noise_repr)[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr], learning_rate * weight)
            assert len(noise_repr) == 1
            assert len(correct_repr) == 1
            noise_repr = noise_repr[0]
            correct_repr = correct_repr[0]
            (loss, predictrepr, correct_score, noise_score, dsequence, dcorrect_repr, dnoise_repr, dcorrect_scorebias, dnoise_scorebias) = r
#            print
#            print "loss = ", loss
#            print "predictrepr = ", predictrepr
#            print "correct_repr = ", correct_repr, self.embed(correct_repr)[0]
#            print "noise_repr = ", noise_repr, self.embed(noise_repr)[0]
#            print "correct_score = ", correct_score
#            print "noise_score = ", noise_score
        else:
            noise_sequences, weights = self.corrupt_examples(correct_sequences)
            # All weights must be the same, if we first multiply by the learning rate
            for w in weights: assert w == weights[0]
            #print self.embeds(correct_sequences)
            #print self.embeds(noise_sequences)
            #print learning_rate * weights[0]
            r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
            (dcorrect_inputss, dnoise_inputss, losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
#            print [d.shape for d in dcorrect_inputss]
#            print [d.shape for d in dnoise_inputss]
#            print "losss", losss.shape, losss
#            print "unpenalized_losss", unpenalized_losss.shape, unpenalized_losss
#            print "l1penaltys", l1penaltys.shape, l1penaltys
#            print "correct_scores", correct_scores.shape, correct_scores
#            print "noise_scores", noise_scores.shape, noise_scores

        import sets
        to_normalize = sets.Set()
        for ecnt in range(len(correct_sequences)):
            (loss, unpenalized_loss, correct_score, noise_score) = \
                (losss[ecnt], unpenalized_losss[ecnt], correct_scores[ecnt], noise_scores[ecnt])
            if l1penaltys.shape == ():
                assert l1penaltys == 0
                l1penalty = 0
            else:
                l1penalty = l1penaltys[ecnt]
            correct_sequence = correct_sequences[ecnt]
            noise_sequence = noise_sequences[ecnt]

            dcorrect_inputs = [d[ecnt] for d in dcorrect_inputss]
            dnoise_inputs = [d[ecnt] for d in dnoise_inputss]

#            print [d.shape for d in dcorrect_inputs]
#            print [d.shape for d in dnoise_inputs]
#            print "loss", loss.shape, loss
#            print "unpenalized_loss", unpenalized_loss.shape, unpenalized_loss
#            print "l1penalty", l1penalty.shape, l1penalty
#            print "correct_score", correct_score.shape, correct_score
#            print "noise_score", noise_score.shape, noise_score


            self.train_loss.add(loss)
            self.train_err.add(correct_score <= noise_score)
            self.train_lossnonzero.add(loss > 0)
            squashloss = 1./(1.+math.exp(-loss))
            self.train_squashloss.add(squashloss)
            if not LBL:
                self.train_unpenalized_loss.add(unpenalized_loss)
                self.train_l1penalty.add(l1penalty)
                self.train_unpenalized_lossnonzero.add(unpenalized_loss > 0)
            self.train_correct_score.add(correct_score)
            self.train_noise_score.add(noise_score)
    
            self.train_cnt += 1
            if self.train_cnt % 10000 == 0:
    #        if self.train_cnt % 1000 == 0:
    #            print self.train_cnt
#                graph.COMPILE_MODE.print_summary()
                logging.info(("After %d updates, pre-update train loss %s" % (self.train_cnt, self.train_loss)))
                logging.info(("After %d updates, pre-update train error %s" % (self.train_cnt, self.train_err)))
                logging.info(("After %d updates, pre-update train Pr(loss != 0) %s" % (self.train_cnt, self.train_lossnonzero)))
                logging.info(("After %d updates, pre-update train squash(loss) %s" % (self.train_cnt, self.train_squashloss)))
                if not LBL:
                    logging.info(("After %d updates, pre-update train unpenalized loss %s" % (self.train_cnt, self.train_unpenalized_loss)))
                    logging.info(("After %d updates, pre-update train l1penalty %s" % (self.train_cnt, self.train_l1penalty)))
                    logging.info(("After %d updates, pre-update train Pr(unpenalized loss != 0) %s" % (self.train_cnt, self.train_unpenalized_lossnonzero)))
                logging.info(("After %d updates, pre-update train correct score %s" % (self.train_cnt, self.train_correct_score)))
                logging.info(("After %d updates, pre-update train noise score %s" % (self.train_cnt, self.train_noise_score)))
    
                if LBL:
                    i = 1.
                    while i < wordmap.len:
                        inti = int(i)
                        str = "word %s, rank %d, score %f" % (wordmap.str(inti), inti, self.parameters.score_biases[inti])
                        logging.info("After %d updates, score biases: %s" % (self.train_cnt, str))
                        i *= 3.2
    
    #            print(("After %d updates, pre-update train loss %s" % (self.train_cnt, self.train_loss.verbose_string())))
    #            print(("After %d updates, pre-update train error %s" % (self.train_cnt, self.train_err.verbose_string())))
    
            for w in weights: assert w == weights[0]
            embedding_learning_rate = HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] * weights[0]
            if loss == 0:
                if LBL:
                    for di in dsequence + [dcorrect_repr, dnoise_repr]:
                        # This tends to trigger if training diverges (NaN)
                        assert (di == 0).all()

                else:
                    for di in dcorrect_inputs + dnoise_inputs:
                        assert (di == 0).all()
    
            if loss != 0:
                if LBL:
                    val = sequence + [correct_repr, noise_repr]
                    dval = dsequence + [dcorrect_repr, dnoise_repr]
    #                print val
                    for (i, di) in zip(val, dval):
    #                for (i, di) in zip(tuple(sequence + [correct_repr, noise_repr]), tuple(dsequence + [dcorrect_repr, dnoise_repr])):
                        assert di.shape[0] == 1
                        di.resize(di.size)
    #                    print i, di
                        self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                            to_normalize.add(i)
    
                    for (i, di) in zip([correct_repr, noise_repr], [dcorrect_scorebias, dnoise_scorebias]):
                        self.parameters.score_biases[i] -= 1.0 * embedding_learning_rate * di
    #                    print "REMOVEME", i, self.parameters.score_biases[i]
                else:
                    for (i, di) in zip(correct_sequence, dcorrect_inputs):
#                        assert di.shape[0] == 1
#                        di.resize(di.size)
    #                    print i, di
                        assert di.shape == (self.parameters.embedding_size,)
                        self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                            to_normalize.add(i)
                    for (i, di) in zip(noise_sequence, dnoise_inputs):
#                        assert di.shape[0] == 1
#                        di.resize(di.size)
    #                    print i, di
                        assert di.shape == (self.parameters.embedding_size,)
                        self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                            to_normalize.add(i)
    #                print to_normalize
    
        if len(to_normalize) > 0:
            to_normalize = [i for i in to_normalize]
#            print "NORMALIZING", to_normalize
            self.parameters.normalize(to_normalize)



    def predict(self, sequence):
        if LBL:
            targetrepr = sequence[-1:]
            sequence = sequence[:-1]
            (predictrepr, score) = graph.predict(self.embed(sequence), self.embed(targetrepr)[0], self.parameters.score_biases[targetrepr], self.parameters)
            return score
        else:
            (score) = graph.predict(self.embed(sequence), self.parameters)
            return score

    def verbose_predict(self, sequence):
        if LBL:
            assert 0
        else:
            (score, prehidden) = graph.verbose_predict(self.embed(sequence), self.parameters)
            return score, prehidden

    def validate(self, sequence):
        """
        Get the rank of this final word, as opposed to all other words in the vocabulary.
        """
        import random
        r = random.Random()
        r.seed(0)
        from hyperparameters import HYPERPARAMETERS

        import copy
        corrupt_sequence = copy.copy(sequence)
        rank = 1
        correct_score = self.predict(sequence)
#        print "CORRECT", correct_score, [wordmap.str(id) for id in sequence]
        for i in range(self.parameters.vocab_size):
            if r.random() > HYPERPARAMETERS["PERCENT OF NOISE EXAMPLES FOR VALIDATION LOGRANK"]: continue
            if i == sequence[-1]: continue
            corrupt_sequence[-1] = i
            corrupt_score = self.predict(corrupt_sequence)
            if correct_score <= corrupt_score:
#                print " CORRUPT", corrupt_score, [wordmap.str(id) for id in corrupt_sequence]
                rank += 1
        return rank
Ejemplo n.º 53
0
class Model:
    """
    A Model can:

    @type parameters: L{Parameters}
    @todo: Document
    """

    import hyperparameters
    import miscglobals
    import vocabulary
    def __init__(self, modelname="", window_size=HYPERPARAMETERS["WINDOW_SIZE"], vocab_size=vocabulary.wordmap().len, embedding_size=HYPERPARAMETERS["EMBEDDING_SIZE"], hidden_size=HYPERPARAMETERS["HIDDEN_SIZE"], seed=miscglobals.RANDOMSEED, initial_embeddings=None, two_hidden_layers=HYPERPARAMETERS["TWO_HIDDEN_LAYERS"]):
        self.modelname = modelname
        self.parameters = Parameters(window_size, vocab_size, embedding_size, hidden_size, seed, initial_embeddings, two_hidden_layers)
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            if self.parameters.two_hidden_layers:
                graph.hidden2_weights = self.parameters.hidden2_weights
                graph.hidden2_biases = self.parameters.hidden2_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#        (self.graph_train, self.graph_predict, self.graph_verbose_predict) = graph.functions(self.parameters)
        import sets
        self.train_loss = MovingAverage()
        self.train_err = MovingAverage()
        self.train_lossnonzero = MovingAverage()
        self.train_squashloss = MovingAverage()
        self.train_unpenalized_loss = MovingAverage()
        self.train_l1penalty = MovingAverage()
        self.train_unpenalized_lossnonzero = MovingAverage()
        self.train_correct_score = MovingAverage()
        self.train_noise_score = MovingAverage()
        self.train_cnt = 0

    def __getstate__(self):
        return (self.modelname, self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt)

    def __setstate__(self, state):
        (self.modelname, self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt) = state
        if LBL:
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases
            graph.score_biases = self.parameters.score_biases
        else:
            graph.hidden_weights = self.parameters.hidden_weights
            graph.hidden_biases = self.parameters.hidden_biases
            if self.parameters.two_hidden_layers:
                graph.hidden2_weights = self.parameters.hidden2_weights
                graph.hidden2_biases = self.parameters.hidden2_biases
            graph.output_weights = self.parameters.output_weights
            graph.output_biases = self.parameters.output_biases

#    def load(self, filename):
#        sys.stderr.write("Loading model from: %s\n" % filename)
#        f = myopen(filename, "rb")
#        (self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt) = pickle.load(f)
#        if LBL:
#            graph.output_weights = self.parameters.output_weights
#            graph.output_biases = self.parameters.output_biases
#            graph.score_biases = self.parameters.score_biases
#        else:
#            graph.hidden_weights = self.parameters.hidden_weights
#            graph.hidden_biases = self.parameters.hidden_biases
#            graph.output_weights = self.parameters.output_weights
#            graph.output_biases = self.parameters.output_biases
#
#    def save(self, filename):
#        sys.stderr.write("Saving model to: %s\n" % filename)
#        f = myopen(filename, "wb")
#        pickle.dump((self.parameters, self.train_loss, self.train_err, self.train_lossnonzero, self.train_squashloss, self.train_unpenalized_loss, self.train_l1penalty, self.train_unpenalized_lossnonzero, self.train_correct_score, self.train_noise_score, self.train_cnt), f)

    def embed(self, sequence):
        """
        Embed a sequence of vocabulary IDs
        """
        seq = [self.parameters.embeddings[s] for s in sequence]
        import numpy
        return [numpy.resize(s, (1, s.size)) for s in seq]
#        return [self.parameters.embeddings[s] for s in sequence]

    def embeds(self, sequences):
        """
        Embed sequences of vocabulary IDs.
        If we are given a list of MINIBATCH lists of SEQLEN items, return a list of SEQLEN matrices of shape (MINIBATCH, EMBSIZE)
        """
        embs = []
        for sequence in sequences:
            embs.append(self.embed(sequence))

        for emb in embs: assert len(emb) == len(embs[0])

        new_embs = []
        for i in range(len(embs[0])):
            colembs = [embs[j][i] for j in range(len(embs))]
            import numpy
            new_embs.append(numpy.vstack(colembs))
            assert new_embs[-1].shape == (len(sequences), self.parameters.embedding_size)
        assert len(new_embs) == len(sequences[0])
        return new_embs

    def train(self, correct_sequences, noise_sequences, weights):
        from hyperparameters import HYPERPARAMETERS
        learning_rate = HYPERPARAMETERS["LEARNING_RATE"]

        # All weights must be the same, because of how we use a scalar learning rate
        assert HYPERPARAMETERS["UNIFORM EXAMPLE WEIGHTS"]
        if HYPERPARAMETERS["UNIFORM EXAMPLE WEIGHTS"]:
            for w in weights: assert w == weights[0]

        if LBL:
            # REWRITE FOR MINIBATCH
            assert 0

#            noise_repr = noise_sequence[-1]
#            correct_repr = correct_sequence[-1]
            noise_repr = noise_sequence[-1:]
            correct_repr = correct_sequence[-1:]
            assert noise_repr != correct_repr
            assert noise_sequence[:-1] == correct_sequence[:-1]
            sequence = correct_sequence[:-1]
#            r = graph.train(self.embed(sequence), self.embed([correct_repr])[0], self.embed([noise_repr])[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr])
            r = graph.train(self.embed(sequence), self.embed(correct_repr)[0], self.embed(noise_repr)[0], self.parameters.score_biases[correct_repr], self.parameters.score_biases[noise_repr], learning_rate * weight)
            assert len(noise_repr) == 1
            assert len(correct_repr) == 1
            noise_repr = noise_repr[0]
            correct_repr = correct_repr[0]
            (loss, predictrepr, correct_score, noise_score, dsequence, dcorrect_repr, dnoise_repr, dcorrect_scorebias, dnoise_scorebias) = r
#            print
#            print "loss = ", loss
#            print "predictrepr = ", predictrepr
#            print "correct_repr = ", correct_repr, self.embed(correct_repr)[0]
#            print "noise_repr = ", noise_repr, self.embed(noise_repr)[0]
#            print "correct_score = ", correct_score
#            print "noise_score = ", noise_score
        else:
            r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
            if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
                (dcorrect_inputss, dnoise_inputss, losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
            else:
                (losss, unpenalized_losss, l1penaltys, correct_scores, noise_scores) = r
#            print [d.shape for d in dcorrect_inputss]
#            print [d.shape for d in dnoise_inputss]
#            print "losss", losss.shape, losss
#            print "unpenalized_losss", unpenalized_losss.shape, unpenalized_losss
#            print "l1penaltys", l1penaltys.shape, l1penaltys
#            print "correct_scores", correct_scores.shape, correct_scores
#            print "noise_scores", noise_scores.shape, noise_scores

        import sets
        to_normalize = sets.Set()
        for ecnt in range(len(correct_sequences)):
            (loss, unpenalized_loss, correct_score, noise_score) = \
                (losss[ecnt], unpenalized_losss[ecnt], correct_scores[ecnt], noise_scores[ecnt])
            if l1penaltys.shape == ():
                assert l1penaltys == 0
                l1penalty = 0
            else:
                l1penalty = l1penaltys[ecnt]
            correct_sequence = correct_sequences[ecnt]
            noise_sequence = noise_sequences[ecnt]

            if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
                dcorrect_inputs = [d[ecnt] for d in dcorrect_inputss]
                dnoise_inputs = [d[ecnt] for d in dnoise_inputss]

#            print [d.shape for d in dcorrect_inputs]
#            print [d.shape for d in dnoise_inputs]
#            print "loss", loss.shape, loss
#            print "unpenalized_loss", unpenalized_loss.shape, unpenalized_loss
#            print "l1penalty", l1penalty.shape, l1penalty
#            print "correct_score", correct_score.shape, correct_score
#            print "noise_score", noise_score.shape, noise_score


            self.train_loss.add(loss)
            self.train_err.add(correct_score <= noise_score)
            self.train_lossnonzero.add(loss > 0)
            squashloss = 1./(1.+math.exp(-loss))
            self.train_squashloss.add(squashloss)
            if not LBL:
                self.train_unpenalized_loss.add(unpenalized_loss)
                self.train_l1penalty.add(l1penalty)
                self.train_unpenalized_lossnonzero.add(unpenalized_loss > 0)
            self.train_correct_score.add(correct_score)
            self.train_noise_score.add(noise_score)
    
            self.train_cnt += 1
            if self.train_cnt % 10000 == 0:
    #        if self.train_cnt % 1000 == 0:
    #            print self.train_cnt
#                graph.COMPILE_MODE.print_summary()
                logging.info(("After %d updates, pre-update train loss %s" % (self.train_cnt, self.train_loss.verbose_string())))
                logging.info(("After %d updates, pre-update train error %s" % (self.train_cnt, self.train_err.verbose_string())))
                logging.info(("After %d updates, pre-update train Pr(loss != 0) %s" % (self.train_cnt, self.train_lossnonzero.verbose_string())))
                logging.info(("After %d updates, pre-update train squash(loss) %s" % (self.train_cnt, self.train_squashloss.verbose_string())))
                if not LBL:
                    logging.info(("After %d updates, pre-update train unpenalized loss %s" % (self.train_cnt, self.train_unpenalized_loss.verbose_string())))
                    logging.info(("After %d updates, pre-update train l1penalty %s" % (self.train_cnt, self.train_l1penalty.verbose_string())))
                    logging.info(("After %d updates, pre-update train Pr(unpenalized loss != 0) %s" % (self.train_cnt, self.train_unpenalized_lossnonzero.verbose_string())))
                logging.info(("After %d updates, pre-update train correct score %s" % (self.train_cnt, self.train_correct_score.verbose_string())))
                logging.info(("After %d updates, pre-update train noise score %s" % (self.train_cnt, self.train_noise_score.verbose_string())))

                self.debug_prehidden_values(correct_sequences)
    
                if LBL:
                    i = 1.
                    while i < wordmap.len:
                        inti = int(i)
                        str = "word %s, rank %d, score %f" % (wordmap.str(inti), inti, self.parameters.score_biases[inti])
                        logging.info("After %d updates, score biases: %s" % (self.train_cnt, str))
                        i *= 3.2
    
    #            print(("After %d updates, pre-update train loss %s" % (self.train_cnt, self.train_loss.verbose_string())))
    #            print(("After %d updates, pre-update train error %s" % (self.train_cnt, self.train_err.verbose_string())))
    

            # All weights must be the same, because of how we use a scalar learning rate
            assert HYPERPARAMETERS["UNIFORM EXAMPLE WEIGHTS"]
            if HYPERPARAMETERS["UNIFORM EXAMPLE WEIGHTS"]:
                for w in weights: assert w == weights[0]
            embedding_learning_rate = HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] * weights[0]
            if loss == 0:
                if LBL:
                    for di in dsequence + [dcorrect_repr, dnoise_repr]:
                        # This tends to trigger if training diverges (NaN)
                        assert (di == 0).all()
    #                if not (di == 0).all():
    #                    print "WARNING:", di
    #                    print "WARNING in ", dsequence + [dcorrect_repr, dnoise_repr]
    #                    print "loss = ", loss
    #                    print "predictrepr = ", predictrepr
    #                    print "correct_repr = ", correct_repr, self.embed(correct_repr)[0]
    #                    print "noise_repr = ", noise_repr, self.embed(noise_repr)[0]
    #                    print "correct_score = ", correct_score
    #                    print "noise_score = ", noise_score
                else:
                    if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
                        for di in dcorrect_inputs + dnoise_inputs:
                            assert (di == 0).all()
    
            if loss != 0:
                if LBL:
                    val = sequence + [correct_repr, noise_repr]
                    dval = dsequence + [dcorrect_repr, dnoise_repr]
    #                print val
                    for (i, di) in zip(val, dval):
    #                for (i, di) in zip(tuple(sequence + [correct_repr, noise_repr]), tuple(dsequence + [dcorrect_repr, dnoise_repr])):
                        assert di.shape[0] == 1
                        di.resize(di.size)
    #                    print i, di
                        self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                        if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                            to_normalize.add(i)
    
                    for (i, di) in zip([correct_repr, noise_repr], [dcorrect_scorebias, dnoise_scorebias]):
                        self.parameters.score_biases[i] -= 1.0 * embedding_learning_rate * di
    #                    print "REMOVEME", i, self.parameters.score_biases[i]
                else:
                    if HYPERPARAMETERS["EMBEDDING_LEARNING_RATE"] != 0:
                        for (i, di) in zip(correct_sequence, dcorrect_inputs):
    #                        assert di.shape[0] == 1
    #                        di.resize(di.size)
        #                    print i, di
                            assert di.shape == (self.parameters.embedding_size,)
                            self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                            if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                                to_normalize.add(i)
                        for (i, di) in zip(noise_sequence, dnoise_inputs):
    #                        assert di.shape[0] == 1
    #                        di.resize(di.size)
        #                    print i, di
                            assert di.shape == (self.parameters.embedding_size,)
                            self.parameters.embeddings[i] -= 1.0 * embedding_learning_rate * di
                            if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]:
                                to_normalize.add(i)
        #                print to_normalize
    
        if len(to_normalize) > 0:
            to_normalize = [i for i in to_normalize]
#            print "NORMALIZING", to_normalize
            self.parameters.normalize(to_normalize)



    def predict(self, sequence):
        if LBL:
            targetrepr = sequence[-1:]
            sequence = sequence[:-1]
            (predictrepr, score) = graph.predict(self.embed(sequence), self.embed(targetrepr)[0], self.parameters.score_biases[targetrepr], self.parameters)
            return score
        else:
            (score) = graph.predict(self.embed(sequence), self.parameters)
            return score

    def verbose_predict(self, sequence):
        if LBL:
            assert 0
        else:
            (score, prehidden) = graph.verbose_predict(self.embed(sequence))
            return score, prehidden
    
    def debug_prehidden_values(self, sequences):
        """
        Give debug output on pre-squash hidden values.
        """
        import numpy
        for (i, ve) in enumerate(sequences):
            (score, prehidden) = self.verbose_predict(ve)
            abs_prehidden = numpy.abs(prehidden)
            med = numpy.median(abs_prehidden)
            abs_prehidden = abs_prehidden.tolist()
            assert len(abs_prehidden) == 1
            abs_prehidden = abs_prehidden[0]
            abs_prehidden.sort()
            abs_prehidden.reverse()

            logging.info("model %s, %s %s %s %s %s" % (self.modelname, self.train_cnt, "abs(pre-squash hidden) median =", med, "max =", abs_prehidden[:3]))
            if i+1 >= 3: break

    def validate(self, sequence):
        """
        Get the rank of this final word, as opposed to all other words in the vocabulary.
        """
        import random
        r = random.Random()
        r.seed(0)
        from hyperparameters import HYPERPARAMETERS

        import copy
        corrupt_sequence = copy.copy(sequence)
        rank = 1
        correct_score = self.predict(sequence)
#        print "CORRECT", correct_score, [wordmap.str(id) for id in sequence]
        for i in range(self.parameters.vocab_size):
            if r.random() > HYPERPARAMETERS["PERCENT OF NOISE EXAMPLES FOR VALIDATION LOGRANK"]: continue
            if i == sequence[-1]: continue
            corrupt_sequence[-1] = i
            corrupt_score = self.predict(corrupt_sequence)
            if correct_score <= corrupt_score:
#                print " CORRUPT", corrupt_score, [wordmap.str(id) for id in corrupt_sequence]
                rank += 1
        return rank

    def validate_errors(self, correct_sequences, noise_sequences):
        """
        Count the errors in this validation batch.
        """

#            r = graph.train(self.embeds(correct_sequences), self.embeds(noise_sequences), learning_rate * weights[0])
        correct_scores = graph.predict(self.embeds(correct_sequences))
        noise_scores = graph.predict(self.embeds(noise_sequences))

#        print correct_scores
#        print noise_scores
        return correct_scores > noise_scores
Ejemplo n.º 54
0

def output_query_result(res):
    if res == None:
        print('None')
    else:
        for r in res:
            print(r)
        print('---')


factory = Factory()
factory.register('FsWrapper', FsWrapper())

try:
    p = Parameters(factory, ['?'], ['db', 'q', 'Q', 'x', 'f'], [],
                   sys.argv[1:])
except ParamError as e:
    Usage('Param error: ' + e.value)

if p.get_switch('?'):
    print('select * from Collections')
    print("update Collections set name='Davids' where name = 'pop'")
    sys.exit(0)

if p.get_option('db') == None:
    Usage('db missing')
if not os.path.isfile(p.get_option('db')):
    Usage('db not found')

try:
    database = SQLiteDatabase(
def main():
    """ """
    sensor = options['sensor']

    mapsets = options['mapsets']
    prefix = options['input_prefix']
    suffix = options['output_suffix']

    metafile = grass.basename(options['metafile'])

    # 6S parameter names shortened following i.atcorr's manual
    atm = int(options['atmospheric_model'])  # Atmospheric model [index]
    aer = int(options['aerosols_model'])  # Aerosols model [index]

    vis = options['visibility_range']  # Visibility [km]
    aod = options['aerosol_optical_depth']  # Aerosol Optical Depth at 550nm

    xps = options['altitude']  # Mean Target Altitude [negative km]
    if not xps:
        msg = "Note, this value will be overwritten if a DEM raster has been "\
              "defined as an input!"
        g.message(msg)

    elevation_map = options['elevation']
    visibility_map = options['visibility']

    radiance = flags['r']
    if radiance:
        global rad_flg
        radiance_flag = 'r'
    else:
        radiance_flag = ''

    # If the scene to be processed was imported via the (custom) python
    # Landsat import script, then, Mapset name == Scene identifier

    mapset = grass.gisenv()['MAPSET']
    if mapset == 'PERMANENT':
        grass.fatal(_('Please change to another mapset than the PERMANENT'))

#    elif 'L' not in mapset:
#        msg = "Assuming the Landsat scene(s) ha-s/ve been imported using the "\
#              "custom python import script, the Mapset's name *should* begin "\
#              "with the letter L!"
#        grass.fatal(_(msg))

    else:
        result = grass.find_file(element='cell_misc',
                                 name=metafile,
                                 mapset='.')
        if not result['file']:
            grass.fatal("The metadata file <%s> is not in GRASS' data base!"
                        % metafile)
        else:
            metafile = result['file']

    #
    # Acquisition's metadata
    #

    msg = "Acquisition metadata for 6S code (line 2 in Parameters file)\n"

    # Month, day
    date = grass.parse_command('i.landsat.toar', flags='p',
                               input='dummy', output='dummy',
                               metfile=metafile, lsatmet='date')
    mon = int(date['date'][5:7])  # Month of acquisition
    day = int(date['date'][8:10])  # Day of acquisition

    # GMT in decimal hours
    gmt = grass.read_command('i.landsat.toar', flags='p',
                             input='dummy', output='dummy',
                             metfile=metafile, lsatmet='time')
    gmt = float(gmt.rstrip('\n'))

    # Scene's center coordinates
    cll = grass.parse_command('g.region', flags='clg')
    lon = float(cll['center_long'])  # Center Longitude [decimal degrees]
    lat = float(cll['center_lat'])  # Center Latitude [decimal degrees]

    msg += str(mon) + ' ' + str(day) + ' ' + str(gmt) + ' ' + \
        str(lon) + ' ' + str(lat)
    g.message(msg)
   
    # 
    # AOD
    #
    if aod:
        aod = float(options['aerosol_optical_depth'])

    else:
        # sane defaults
        if 4 < mon < 10:
            aod = float(0.222)  # summer
        else:
            aod = float(0.111)  # winter

    #
    # Mapsets are Scenes. Read'em all!
    #

    if mapsets == 'all':
        scenes = grass.mapsets()

    elif mapsets == 'current':
        scenes = [mapset]

    else:
        scenes = mapsets.split(',')

    if 'PERMANENT' in scenes:
        scenes.remove('PERMANENT')

    # access only to specific mapsets!
    msg = "\n|* Performing atmospheric correction for scenes:  %s" % scenes
    g.message(msg)

    for scene in scenes:

        # ensure access only to *current* mapset
        run('g.mapsets', mapset='.', operation='set')

        # scene's basename as in GRASS' db
        basename = grass.read_command('g.mapset', flags='p')
        msg = "   | Processing scene:  %s" % basename
        g.message(msg)

        # loop over Landsat bands in question
        for band in sensors[sensor].keys():

            inputband = prefix + str(band)
            msg = '\n>>> Processing band: {band}'.format(band=inputband)
            g.message(msg)


            # Generate 6S parameterization file
            p6s = Parameters(geo=geo[sensor],
                             mon=mon, day=day, gmt=gmt, lon=lon, lat=lat,
                             atm=atm,
                             aer=aer,
                             vis=vis,
                             aod=aod,
                             xps=xps, xpp=xpp,
                             bnd=sensors[sensor][band])
            
            #
            # Temporary files
            #
            tmpfile = grass.tempfile()
            tmp = "tmp." + grass.basename(tmpfile)  # use its basename

            tmp_p6s = grass.tempfile()  # 6S Parameters ASCII file
            tmp_atm_cor = "%s_cor_out" % tmp  # Atmospherically Corrected Img

            p6s.export_ascii(tmp_p6s)

            # Process band-wise atmospheric correction with 6s
            msg = "6S parameters:\n\n"
            msg += p6s.parameters
            g.message(msg)

            # inform about input's range?
            input_range = grass.parse_command('r.info', flags='r', map=inputband)
            input_range['min'] = float(input_range['min'])
            input_range['max'] = float(input_range['max'])
            msg = "Input range: %.2f ~ %.2f" % (input_range['min'], input_range['max'])
            g.message(msg)

            #
            # Applying 6S Atmospheric Correction algorithm
            #
            run_i_atcorr(radiance_flag,
                         inputband,
                         input_range,
                         elevation_map,
                         visibility_map,
                         tmp_p6s,
                         tmp_atm_cor,
                         (0,1))
        
            # inform about output's range?
            output_range = grass.parse_command('r.info', flags='r', map=tmp_atm_cor)
            output_range['min'] = float(output_range['min'])
            output_range['max'] = float(output_range['max'])
            msg = "Output range: %.2f ~ %.2f" \
                % (output_range['min'], output_range['max'])
            g.message(msg)

            # add suffix to basename & rename end product
            atm_cor_nam = ("%s%s.%s" % (prefix, suffix, band))
            run('g.rename', rast=(tmp_atm_cor, atm_cor_nam))
Ejemplo n.º 56
0
 def test_epoch(self):
     x = np.array([[1, i] for i in range(10)])
     y = np.array([2*i + 10 for i in range(10)])
     gd = GradientDescent(Parameters(batch_size=4, learning_rate=1, decay=0.5), linear_regression_hypothesis)
     gd.epoch((x, y), np.zeros(2))
     self.assertEqual(gd.parameters.learning_rate, 0.5)
                   index=True,
                   header=True)

        # for each row in the design of the experiment table
        for exp in range(doe.shape[0]):
            # repeat it for number_of_runs times
            for run in range(number_of_runs):

                print('Starting experiment n.', str(exp))

                directory = main_path + '/' + multiple_experiments_folder + '/exp' + str(
                    exp) + '/run_' + str(run) + '/'
                if not os.path.exists(directory):
                    os.makedirs(directory)

                    parameters = Parameters()
                    #parameters.set('goal_selection_mode', 'som')
                    parameters.set('doe_experiment_id', exp)
                    parameters.set('run_id', run)
                    romi_dataset_folder = main_path + '/romi_data/'
                    parameters.set('directory_romi_dataset',
                                   romi_dataset_folder)

                    parameters.set('directory_main', directory)
                    parameters.set('directory_models', directory + 'models/')
                    parameters.set('directory_pretrained_models',
                                   main_path + '/pretrained_models/')
                    parameters.set('directory_results', directory + 'results/')
                    parameters.set('directory_plots', directory + 'plots/')

                    # design of experiments
Ejemplo n.º 58
0
MAX_DOCUMENT_LENGTH = 300
EMBEDDING_SIZE = 5
HIDDEN_SIZE1 = 4
HIDDEN_SIZE2 = 4
ATTENTION_SIZE = 2
lr = 1e-4
BATCH_SIZE = 256
KEEP_PROB = 0.5
LAMBDA = 0.0001

MAX_LABEL = 2
epochs = 200

#dbpedia = tf.contrib.learn.datasets.load_dataset('dbpedia')
parameters = Parameters()
parameters.add_parameter("METHOD", "O-SVM")
parameters.add_parameter("MAX_DOCUMENT_LENGTH", MAX_DOCUMENT_LENGTH)
parameters.add_parameter("EMBEDDING_SIZE", EMBEDDING_SIZE)
parameters.add_parameter("HIDDEN_SIZE1", HIDDEN_SIZE1)
parameters.add_parameter("HIDDEN_SIZE2", HIDDEN_SIZE2)
parameters.add_parameter("lr", lr)
parameters.add_parameter("BATCH_SIZE", BATCH_SIZE)
parameters.add_parameter("KEEP_PROB", KEEP_PROB)
parameters.add_parameter("LAMBDA", LAMBDA)
parameters.add_parameter("MAX_LABEL", MAX_LABEL)
parameters.add_parameter("epochs", epochs)

# load data
x_train, y_train = (
    [], []
Ejemplo n.º 59
0
from nupic.research.spatial_pooler import SpatialPooler
from vision_testbench import VisionTestBench
from classifiers import KNNClassifier

trainingDataset = 'DataSets/OCR/characters/cmr_hex.xml'
minAccuracy = 100.0
maxTrainingCycles = 5
testingDataset = 'DataSets/OCR/characters/cmr_hex.xml'

if __name__ == "__main__":
    # Get training images and convert them to vectors.
    trainingImages, trainingTags = data.getImagesAndTags(trainingDataset)
    trainingVectors = encoder.imagesToVectors(trainingImages)

    # Specify parameter values to search
    parameters = Parameters()
    parameters.define("synPermConn", [0.5])
    parameters.define("synPermDecFrac", [1.0, 0.5, 0.1])
    parameters.define("synPermIncFrac", [1.0, 0.5, 0.1])

    # Run the model until all combinations have been tried
    while parameters.getNumResults() < parameters.numCombinations:

        # Pick a combination of parameter values
        parameters.nextCombination()
        #parameters.nextRandomCombination()
        synPermConn = parameters.getValue("synPermConn")
        synPermDec = synPermConn * parameters.getValue("synPermDecFrac")
        synPermInc = synPermConn * parameters.getValue("synPermIncFrac")

        # Instantiate our spatial pooler
Ejemplo n.º 60
0
NUM_EPOCH = 3

print('load dataset')
dataset = os.path.join(DATA_DIR, DATASET)
with open(dataset, 'rb') as f:
    X, y = pickle.load(f)

n, d = X.shape

params = []

params.append(
    Parameters(name="full-sgd",
               num_epoch=NUM_EPOCH,
               lr_type='decay',
               initial_lr=2,
               tau=d,
               regularizer=1 / n,
               estimate='(t+tau)^2'))
params.append(
    Parameters(name="full-sgd-no-shift",
               num_epoch=NUM_EPOCH,
               lr_type='decay',
               initial_lr=2,
               tau=1,
               regularizer=1 / n,
               estimate='(t+tau)^2'))
params.append(
    Parameters(name="top1-no-memory",
               num_epoch=NUM_EPOCH,
               lr_type='decay',