def backup_all(run_number, stations):
    """
    Backup all indicated stations.

    See `main` for detailed parameter documentation.
    """
    run = 'Run' + run_number
    for s in stations:
        station = 'station_' + s
        src_path = osjoin(DEFAULT_STATION_PATH, station)
        run_path = osjoin(DEFAULT_BACKUP_PATH, run)
        backup_path = osjoin(run_path, station)
        backup(src_path, run_path, backup_path)
Exemple #2
0
    def interactive_figure(self):
        """Add interactivity, ie. the option to show/hide lines to the figure."""

        lines = self.plot_figure()  # Generates a list of lines
        labels = [line for line in lines.keys()]  # Prepare a list of labels for the tickboxes
        lineNames = ['l'+str(x) for x in range(len(lines))]  # Prepare a list of names for the lines
        lines = {k: v for k, v in zip(lineNames, lines.values())}  # Create a dictionary {name: line}
        activeL = list(range(len(lines)))  # List of all line index to mark them as active in CheckboxGroup

        JScode = [self._visible_line_JS(k) for k in lines]  # Generate JavaScript for each line
        JScode = '\n'.join(JScode)  # From a list to a single string

        with open(osjoin(getcwd(), 'mLearning', 'JScodeAllLines.js'), 'r') as fileJS:
            buttonJS = fileJS.read()  # Read JavaScript code from a file to toggle the visibility of all lines
        # with open(osjoin(getcwd(), 'mLearning', 'JScode.js'), 'w+') as codeFile:
        #     codeFile.write(JScode)  # Write whole CustomJS to a file for debugging purposes

        callback = CustomJS(code=JScode, args={})  # Args will be added once checkbox and button are added to lines
        checkbox = CheckboxGroup(labels=labels,
                                 active=activeL,  # Labels to be ticked from the beginning
                                 callback=callback,
                                 name='checkbox')  # JavaScript var name

        buttonCallback = CustomJS(code=buttonJS, args={})  # Same as for callback
        button = Button(label="Select/Unselect All",  # Button HTML text
                        button_type="default",
                        callback=buttonCallback,
                        name='button')  # JavaScript var name

        lines['checkbox'], lines['button'] = checkbox, button  # Adding widget to lines
        callback.args, buttonCallback.args = lines, lines  # And then lines to callback
        layout = row(self.fig, widgetbox(children=[button, checkbox], width=200))  # One row, two columns

        logging.debug('Interaction implemented')
        return layout
Exemple #3
0
    def openRasterImage(self):
        self.task_is_done = False
        dialog = wx.FileDialog(
            self,
            message="Choose a raster core",
            wildcard="BMP, PNG, JPG|*.bmp;*.png;*.jpg|"\
                     "BMP files (*.bmp)|*.bmp|"\
                     "PNG files (*.png)|*.png|"\
                     "JPG files (*.jpg)|*.jpg",
            style=wx.OPEN
        )

        if dialog.ShowModal() == wx.ID_OK:
            raster_filename = dialog.GetFilename()
            raster_format = raster_filename.split('.')[-1]
            raster_directory = dialog.GetDirectory()
            raster_path = osjoin(
                raster_directory,
                raster_filename
            )

            if open_image(raster_path):
                self.__reset_everything()
                self.raster_filename = copy(raster_filename)
                self.raster_format = copy(raster_format)
                self.raster_directory = copy(raster_directory)
                self.raster_path = copy(raster_path)
                self.task_is_done = True
            else:
                message_template = "'%s' is invalid %s image."
                message_data = (raster_path, raster_format)
                message = message_template % message_data
                self.showErrorMessage(message)

        dialog.Destroy()
Exemple #4
0
 def __call__(self,Ntrain):
   d = loadSimulation(self.source)
   H = eval(d[0])
   
   # Check if a kalman filter exists, if not create one
   kalFile = self.noExtSource+"_kal.py"
   if isfile(kalFile):
     with open(kalFile,'r') as f:
       kal = eval(f.read())
   else:
     kal=kalman(d[-2].T)
     with open(kalFile,'w') as f:
       f.write(repr(kal))
   
   # Load data generated from same system
   self.sims = list()
   for f in self.fles:
     if f[-7:]==".pkl.gz" and f[-13:]!='_cache.pkl.gz':
       try:
         s = loadSimulation(osjoin(self.sourceDir,f))
         if eval(s[0]).spec==H.spec:
           self.sims.append(s)
       except EOFError:
         print "Failed to open", f, "skipping."
   
   # Grab the trials from the loaded data
   x,y = zip(*[(s[2],s[-2]) for s in self.sims])
   trials = Trials(x,y,Ntrain)
   trials.initialise(kal,self.pca,self.fd)
   return(DataSet(H,kal,trials,self.source,self.psfidx))
def write_output(in_reader, out_location, out_name, out_type, debug_flag=False):

    # Set up file to write to
    feature_index = 0
    spatial_reference = SpatialReference()
    spatial_reference.ImportFromEPSG(4326)
    driver = GetDriverByName(out_type)
    if out_type == 'ESRI Shapefile':
        out_data = driver.CreateDataSource('{0}.shp'.format(splitext(osjoin(out_location, out_name))[0]))
    elif out_type == 'GeoJSON':
        out_data = driver.CreateDataSource('{0}.geojson'.format(splitext(osjoin(out_location, out_name))[0]))
    elif out_type == 'KML':
        out_data = driver.CreateDataSource('{0}.kml'.format(splitext(osjoin(out_location, out_name))[0]))
    out_layer = out_data.CreateLayer('layer', spatial_reference, wkbPoint)

    # Set up fields
    setup_fields(out_layer)

    # Set Layer definition
    layer_definition = out_layer.GetLayerDefn()

    # Add points as they are processed by the reader
    for entry in in_reader:

        point = Geometry(wkbPoint)
        point.SetPoint(0, entry[4], entry[5])
        feature = Feature(layer_definition)
        feature.SetGeometry(point)

        # Increment FID value if it is a shp output
        if out_type == 'ESRI Shapefile':
            feature_index += 1
            feature.SetFID(feature_index)

        # Fill out all other fields
        fill_fields(feature, entry)

        # Add the feature to the layer
        out_layer.CreateFeature(feature)

        # Cleanup
        feature.Destroy()
        point.Destroy()

    # Big cleanup
    out_data.Destroy()
Exemple #6
0
def move_src_file(filename, new_name):
    '''Move file with data to directory with data files.'''
    new_filename = new_name + '.csv'

    curr_path = os.getcwd()
    new_path = osjoin( curr_path, 'site_media', 'csv', new_filename )

    print 'Copy file %s to %s' % (new_filename, new_path)
    shutil.move( filename, new_path )
	def create_project_file(self, project_name):
		fname=osjoin(self.project_folder, "%s.sublime-project" % project_name);

		if not self.picked_project_structure.has_project_proto:
			f = open(fname, "w+")

			#create folder for any buildscripts
			#os.makedirs(os.path.join(self.project_folder,"buildscripts"));

			settings = ""

			for item in self.picked_project_structure.settings:
				value = self.picked_project_structure.settings[item]
				settings += "\"%s\": %s," % (item, "\"%s\"" % value if isinstance(value, str) else value)

			if settings.endswith(","):
				settings = settings[:-1]

			build_systems = ""

			for bsystem in self.picked_project_structure.build_systems:
				build_systems += "{"

				for item in bsystem:
					value = bsystem[item]
					print value, type(value)
					build_systems += "\"%s\": %s," % (item, "\"%s\"" % value if isinstance(value, unicode) or isinstance(value, str) else value)

				if build_systems.endswith(","):
					build_systems = build_systems[:-1]

				build_systems += "},"

			if build_systems.endswith(","):
				build_systems = build_systems[:-1]

			f.write("{\"folders\":[{\"path\":\"%s\"}], \"settings\":{%s}, \"build_systems\":[%s]}" % (project_name, settings, build_systems))
			f.close()
		else:
			#load the prototype as a json object
			print self.picked_project_structure.proto_file
			json_object = json.load(open(self.picked_project_structure.proto_file,'r'));
			
			project_folder=self.project_folder.lower();

			# make sure all paths in the project file a nix style paths
			if os.name=="nt":
				project_folder=project_folder.replace("\\", "/").replace("c:", "/c")

			json_object["folders"][0]["path"]=project_folder;

			#dump the fname
			with open(fname, 'w') as f:
				f.write(unicode(pretty.json_dumps(json_object)))
Exemple #8
0
def mk_temp_dir(dir, prefix):
    def randstr():
        return hex(hash(random()))[2:7]

    base = osjoin(dir, prefix)
    while True:
        try:
            newdir = base + randstr()
            mkdir(newdir)
            return newdir
        except OSError:
            pass
	def run(self):
		# load all settings
		settings = sublime.load_settings("FancyProjects.sublime-settings")
		self.template_path = settings.get("project_template_directory")
		self.user_project_path = settings.get("user_project_directory")
		self.use_sublime_project_format = settings.get("use_sublime_project_format")
		self.use_counter_if_project_folder_already_exists = settings.get("use_counter_if_project_folder_already_exists")
		self.has_project_proto=False

		# insert packages path
		self.template_path = os.path.join(sublime.packages_path(), "Fancy Projects", "templates");
		self.user_project_path=os.path.join(os.path.expanduser("~"),"Projects");


		# get the content of the template folder
		template_folder_content = os.listdir(self.template_path)

		template_folder_items = []

		# get .fancyproj files
		for item in template_folder_content:
			if item.endswith(".fancyproj"):
				template_folder_items.append(item)


		# create instances of ProjectStructure from the .fancyproj file list
		self.project_structures = []

		for template_item in template_folder_items:
			if not os.path.exists(os.path.join(self.template_path, template_item, "fancyproj.json")):
				sublime.error_message("ERROR: %s is invalid" % template_item)
				return

			json_object = json.load(open("%s/%s/fancyproj.json" % (self.template_path, template_item)))
			project_structure = ProjectStructure(osjoin(self.template_path, template_item), json_object)

			project_structure.check_for_protoproj();

			self.project_structures.append(project_structure)

		# create a list with all templates
		self.quickpanel_template_list = []

		for project_template in self.project_structures:
			self.quickpanel_template_list.append(project_template.to_quickpanel_item())

		self.window.show_quick_panel(self.quickpanel_template_list, self.on_picked_template)
	def create_project(self, project_structure, new_project_name):
		# if the user wants to use the sublime project format, the project file should be a directory below
		# the actual project
		project_subfolder = ""
		print "create_project" +"\n";
		if self.use_sublime_project_format:
			project_subfolder = new_project_name

		# define the paths, which are used for copy
		source = os.path.join(project_structure.path, "contents")

		


		self.project_folder = os.path.join(self.user_project_path, new_project_name)
		destination = os.path.join(self.user_project_path, new_project_name, project_subfolder)

		# decide how the files should be copied
		if os.path.exists(self.project_folder) and not self.use_counter_if_project_folder_already_exists:
			sublime.error_message("Project %s already exists!" % new_project_name)
		elif os.path.exists(self.project_folder) and self.use_counter_if_project_folder_already_exists:
			counter = 2

			solution_found = False

			while not solution_found:
				new_project_name_with_counter = "%s%s" % (new_project_name, counter)

				if self.use_sublime_project_format:
					project_subfolder = new_project_name_with_counter
					
				self.project_folder = os.path.join(self.user_project_path, new_project_name_with_counter)
				destination = os.path.join(self.user_project_path, new_project_name_with_counter, project_subfolder)

				if os.path.exists(self.project_folder):
					counter += 1
				else:
					self.copy_folder(source, destination, new_project_name_with_counter)
					solution_found = True
		else:
			self.copy_folder(source, destination, new_project_name)
		
		#load project intends to use any custom scripts check for a script folder
		script_path=os.path.join(project_structure.path, "scripts");

		if os.path.exists(script_path):
		 	copytree(script_path, osjoin(self.project_folder,"scripts"));
Exemple #11
0
  def _set_locators(s):
    s._attr_dir_path = osjoin(s._yamldb_dir, 'attr')
    s._recs_path = osjoin(s._yamldb_dir, 'recordings.yaml')
    s._event_dir = osjoin(s._yamldb_dir,'events')
    s._event_def_path = osjoin(s._event_dir, 'event_def.yaml')

    s._event_path_fn = (
      lambda experiment : 
        osjoin(s._event_dir, '_'.join([experiment, 'events.txt']))
    )

    # Environment
    s._env_path = osjoin(s._yamldb_dir, 'environment.yaml')
    s._env_def = yaml.load(open(s._env_path, 'r'))

    # Recording Meta 
    s._meta_path_fn = lambda exp, sub : osjoin(s._env_def['data_base'], exp, 
      'meta', sub, '%s_%s_recordingDefinition.json' % (sub, exp))
Exemple #12
0
 def addModules(self):
     """auto-add or re-auto-add modules in self.moduleDirs
         NOTE: this function should be called if the bot is to have any
               modules imported. This is not automatically called simply
               for flexibility to the programmer."""
     for mdir in self.moduleDirs:
         for dirpath, subdirs, files in walk(mdir):
             for f in files:
                 if not f.endswith('.py'):
                     continue
                 try:
                     module = load_source(f[:-3], osjoin(dirpath, f))
                     for attr in dir(module):
                         if isfunction(getattr(module, attr)):
                             print('trying to load module: {}'.format(attr))
                             eventHandler = getattr(module, attr)
                             KWARGS = {}
                             if hasattr(eventHandler, 'nick'):
                                 KWARGS['nick'] = eventHandler.nick
                             if hasattr(eventHandler, 'ident'):
                                 KWARGS['ident'] = eventHandler.ident
                             if hasattr(eventHandler, 'hostname'):
                                 KWARGS['hostname'] = self.hostname
                             if hasattr(eventHandler, 'command'):
                                 KWARGS['command'] = eventHandler.command
                             if hasattr(eventHandler, 'argument'):
                                 KWARGS['argument'] = eventHandler.argument
                             if hasattr(eventHandler, 'message'):
                                 KWARGS['message'] = eventHandler.message
                             self.eventHandlers.append(
                                 eventhandler.EventHandler(eventHandler,
                                     **KWARGS));
                             self.importedModules[eventHandler] =\
                                 self.eventHandlers[-1]
                             print('\x1b[32;1mmodule loaded: {}\x1b[0m'.\
                                 format(module))
                     ## END> for attr in dir(module)
                 except Exception as e:
                     print('\x1b[31;1mError loading file {}: {}\x1b[0m'.\
                         format(f, e))
Exemple #13
0
    def saveVectorImage(self):
        self.task_is_done = False
        new_name = self.raster_filename.replace(self.raster_format, 'svg')
        dialog = wx.FileDialog(
            self,
            message="Save file as ...",
            defaultDir=self.raster_directory,
            defaultFile=new_name,
            wildcard="SVG file (*.svg)|*.svg",
            style=wx.SAVE | wx.OVERWRITE_PROMPT
        )

        if dialog.ShowModal() == wx.ID_OK:
            self.svg_filename = dialog.GetFilename()
            self.svg_directory = dialog.GetDirectory()
            self.svg_path = osjoin(self.svg_directory, self.svg_filename)

            if not save(self.svg_path):
                message = "Cannot save the file with name\n'%s'." % self.svg_path
                self.showErrorMessage(message)
            else:
                self.task_is_done = True

        dialog.Destroy()
def calculate_focal_length(image_width, sensor_width, camera_params_path):
    focal_length = np.load(osjoin(camera_params_path, 'FocalLengthMm.npy'),
                           allow_pickle=True)
    focal_length_pixel = (focal_length / sensor_width) * image_width
    return focal_length_pixel
Exemple #15
0
for i,tm in enumerate(tau):
  t = tm %(2*pi) # Stride fractions must be between 0 and 2*pi
  
  # Calculate stability margins
  M.append([p0.margin(t),p1.margin(t)])

  # Plot the convex hull of this pattern of motion for the forward walk
  figure(1)
  clf()
  plot(*p0.support(t).boundary.coords.xy,color='b',lw=4)
  #cv = (t-p0.fon)/((p0.foff-p0.fon)%(2*pi))%1.0
  scatter(*array(p0.support(t).boundary.coords.xy)[:,:-1],s=900,c='m')
  axis([-1.5,1.5,-1.5,1.5])
  axis('off')
  tight_layout()
  savefig(osjoin(animDir,"forwardgait"+str(i).zfill(5)))
  
  # Plot the convex hull of this pattern of motion for the reverse walk
  figure(2)
  clf()
  plot(*p1.support(t).boundary.coords.xy,color='b',lw=4)
  scatter(*array(p1.support(t).boundary.coords.xy)[:,:-1],s=900,c='r')
  axis([-1.5,1.5,-1.5,1.5])
  axis('off')
  tight_layout()
  savefig(osjoin(animDir,"reversegait"+str(i).zfill(5)))

M = array(M)

# Plot the quasi-static stability margin for the forward and reverse walker 
# as a function of position in the cycle.
Exemple #16
0
path_dir = sys.argv[-2]
casa_path = sys.argv[-1]#/usr/local/bin/CASA-4.6/casa-release-4.6.0-el6/

# flux model vs time
integ = 10.
total_time = 10. * 60.
num_points = total_time / integ
time_bins = np.arange(integ, total_time + integ, integ)  # end of bins
noise = np.random.randn(len(time_bins))
fluxarray = 5. * np.sin((2 * np.pi / 160) * time_bins) + 10 + noise * 0.5
plt.plot(time_bins, fluxarray, marker='o', ls='')

# array config
# MAC:configf='/Applications/CASA-4.6.app/Contents/data/alma/simmos/vla.c.cfg'
# configf = '/usr/local/bin/CASA-4.6/casa-release-4.6.0-el6/data/alma/simmos/vla.c.cfg'
configf = osjoin(casa_path, 'data/alma/simmos/vla.c.cfg')
data = ascii.read(configf, data_start=1,
                  names=['X', 'Y', 'Z', 'DIAM', 'ANT'], guess=True)
xx = data['X']
yy = data['Y']
zz = data['Z']
diam = data['DIAM']

# src info
loca = "J2000 10h00m00.00s -30d00m00.0s"
hrsref = loca.split(' ')[1].split('h')[0] + 'h'
minref = loca.split(' ')[2].split('d')[0] + 'deg'
freq = '5GHz'
mj = "0.5arcsec"
ma = '0.1arcsec'
pa = '45.0deg'
    from turbustat.statistics.apodizing_kernels import \
        (CosineBellWindow, TukeyWindow, HanningWindow, SplitCosineBellWindow)

    # Apodizing kernel

    shape = (101, 101)
    taper = HanningWindow()
    data = taper(shape)

    plt.subplot(121)
    plt.imshow(data, cmap='viridis', origin='lower')
    plt.colorbar()
    plt.subplot(122)
    plt.plot(data[shape[0] // 2])
    plt.savefig(osjoin(fig_path, 'hanning.png'))
    plt.close()

    taper2 = CosineBellWindow(alpha=0.98)
    data2 = taper2(shape)

    plt.subplot(121)
    plt.imshow(data2, cmap='viridis', origin='lower')
    plt.colorbar()
    plt.subplot(122)
    plt.plot(data2[shape[0] // 2])
    plt.savefig(osjoin(fig_path, 'cosine.png'))
    plt.close()

    taper3 = SplitCosineBellWindow(alpha=0.3, beta=0.8)
    data3 = taper3(shape)
from pc_path import definir_path
path_git, path_datos_global = definir_path()
from os.path import join as osjoin

from sklearn.metrics import (normalized_mutual_info_score,
                            mutual_info_score,
                            adjusted_mutual_info_score)

###############################################################################
##### Importar grafos
###############################################################################

dates = [2015, 2016, 2017, 2018]
gs_hip = []
names_hip = ['MLyStats_{}-10-1.gexf'.format(i) for i in dates]
paths_hip = [osjoin(path_git, 'Grafos_guardados', name) for name in names_hip]
for path in paths_hip:
    gs_hip.append(nx.read_gexf(path))
gs_lsa = []
names_lsa = ['MLyStats_LSA_26dim_q0.005_{}-10-1.gexf'.format(i) for i in dates]
paths_lsa = [osjoin(path_git, 'Grafos_guardados', name) for name in names_lsa]
for path in paths_lsa:
    gs_lsa.append(nx.read_gexf(path))

###############################################################################
##### Calcular info mutua entre 'infomap' y 'category'
###############################################################################

def calcular_ims(grafos, mutual_info='normal'):
    mi_function = {'normal': mutual_info_score,
                   'normalized': normalized_mutual_info_score,
Exemple #19
0
    for dirpath, dirnames, filenames in os.walk(path):
        if filenames:
            data_files[dirpath] = [f for f in filenames]
    return data_files


setup(
    name='boilerplate',
    version=boilerplate.VERSION,
    description='Easy to use tool for painless project layout templating',
    author='Kuba Janoszek',
    author_email='*****@*****.**',
    include_package_data=True,
    url='https://github.com/jqb/boilerplate/tree/ver-%s' % boilerplate.VERSION,
    packages=find_packages(),
    package_data=package_data(osjoin("boilerplate", "tmpl")),
    classifiers=[
        'Development Status :: 3 - Alpha',
        'Environment :: Web Environment',
        'Intended Audience :: Developers',
        'License :: OSI Approved :: BSD License',
        'Operating System :: OS Independent',
        'Programming Language :: Python',
    ],
    scripts=['bin/boil'],
    zip_safe=False,
)


# python setup.py build sdist bdist_wheel upload
    from turbustat.statistics.apodizing_kernels import \
        (CosineBellWindow, TukeyWindow, HanningWindow, SplitCosineBellWindow)

    # Apodizing kernel

    shape = (101, 101)
    taper = HanningWindow()
    data = taper(shape)

    plt.subplot(121)
    plt.imshow(data, cmap='viridis', origin='lower')
    plt.colorbar()
    plt.subplot(122)
    plt.plot(data[shape[0] // 2])
    plt.savefig(osjoin(fig_path, 'hanning.png'))
    plt.close()

    taper2 = CosineBellWindow(alpha=0.98)
    data2 = taper2(shape)

    plt.subplot(121)
    plt.imshow(data2, cmap='viridis', origin='lower')
    plt.colorbar()
    plt.subplot(122)
    plt.plot(data2[shape[0] // 2])
    plt.savefig(osjoin(fig_path, 'cosine.png'))
    plt.close()

    taper3 = SplitCosineBellWindow(alpha=0.3, beta=0.8)
    data3 = taper3(shape)
import numpy as np
from align import AlignDlib

# In[2]:

# Load model train sẵn
from model import create_model
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')

# In[3]:

#Load dữ liệu từ database
import pickle

PATH_NEW_DATABASE = osjoin(os.getcwd(), 'newdatabase')
PATH_DATABASE = osjoin(os.getcwd(), 'database')
PATH_DATABASE_IMAGE = osjoin(PATH_DATABASE, 'image')

if not os.path.isfile(osjoin(
        PATH_DATABASE, 'x_vector.pkl')) or not os.path.isfile(
            osjoin(PATH_DATABASE, 'x_label.pkl')) or not os.path.isfile(
                osjoin(PATH_DATABASE, 'x_name.pkl')):
    x_vector = []
    x_label = []
    x_name = []
else:
    with open(osjoin(PATH_DATABASE, 'x_vector.pkl'), 'rb') as f:
        x_vector = pickle.load(f)
    with open(osjoin(PATH_DATABASE, 'x_label.pkl'), 'rb') as f:
        x_label = pickle.load(f)
def perform_plot_gridsearch(names, out_dirs):
    '''
    Function for grid scores plotting (working with scikit 0.20)
    '''
    logger = get_logger()

    for name, out_dir in zip(names, out_dirs):

        # Read written results
        gps = parse_yaml(osjoin(out_dir, "parameters.yaml"))
        score_obj = pickle.load(openfile(osjoin(out_dir, "results.pkl"), "rb"))

        param_keys = [f"param_{key}" for key in gps["params"].keys()]
        if not param_keys:
            logger.warning("Add at least 1 parameter (even just 1 value)")
            continue

        # Re-arrange scoring such that the refitted one is always on top
        score_names = gps["scoring"]
        refit_score = gps["refit"]
        del score_names[score_names.index(refit_score)]
        score_names.insert(0, refit_score)

        # Extract scores
        x_labels = []
        y_values = {}
        y_errors = {}

        for sn in score_names:
            y_values[sn] = {"train": [], "test": []}
            y_errors[sn] = {"train": [], "test": []}

        # Get indices of values to put on x-axis and identify parameter combination
        values_indices = [
            range(len(values)) for values in gps["params"].values()
        ]

        y_axis_mins = {sn: 9999 for sn in score_names}
        y_axis_maxs = {sn: -9999 for sn in score_names}
        for indices, case in zip(
                itertools.product(*values_indices),
                itertools.product(*list(gps["params"].values()))):
            df_case = score_obj.copy()
            for i_case, i_key in zip(case, param_keys):
                df_case = df_case.loc[df_case[i_key] ==
                                      df_case[i_key].dtype.type(i_case)]

            x_labels.append(",".join([str(i) for i in indices]))
            # As we just nailed it down to one value
            for sn in score_names:
                for tt in ("train", "test"):
                    y_values[sn][tt].append(
                        df_case[f"mean_{tt}_{sn}"].values[0])
                    y_errors[sn][tt].append(
                        df_case[f"std_{tt}_{sn}"].values[0])
                    y_axis_mins[sn] = min(y_axis_mins[sn],
                                          y_values[sn][tt][-1])
                    y_axis_maxs[sn] = max(y_axis_maxs[sn],
                                          y_values[sn][tt][-1])

        # Prepare text for parameters
        text_parameters = "\n".join(
            [f"{key}: {values}" for key, values in gps["params"].items()])

        # To determine fontsizes later
        figsize = (35, 18 * len(score_names))
        fig, axes = plt.subplots(len(score_names),
                                 1,
                                 sharex=True,
                                 gridspec_kw={"hspace": 0.05},
                                 figsize=figsize)
        ax_plot = dict(zip(score_names, axes))

        # The axes to put the parameter list
        ax_main = axes[-1]
        # The axes with the title being on top
        ax_top = axes[0]

        points_per_inch = 72
        markerstyles = ["o", "+"]
        markersize = 20

        for sn in score_names:
            ax = ax_plot[sn]
            ax_min = y_axis_mins[sn] - (y_axis_maxs[sn] -
                                        y_axis_mins[sn]) / 10.
            ax_max = y_axis_maxs[sn] + (y_axis_maxs[sn] -
                                        y_axis_mins[sn]) / 10.
            ax.set_ylim(ax_min, ax_max)
            ax.set_ylabel(f"mean {sn}", fontsize=20)
            ax.get_yaxis().set_tick_params(labelsize=20)

            for j, tt in enumerate(("train", "test")):
                markerstyle = markerstyles[j % len(markerstyles)]

                ax.errorbar(range(len(x_labels)),
                            y_values[sn][tt],
                            yerr=y_errors[sn][tt],
                            ls="",
                            marker=markerstyle,
                            markersize=markersize,
                            label=f"{sn} ({tt})")

                # Add values to points
                ylim = ax.get_ylim()
                plot_labels_offset = (ylim[1] - ylim[0]) / 40
                for x, y in enumerate(y_values[sn][tt]):
                    ax.text(x, y - plot_labels_offset, f"{y:.4f}", fontsize=20)

        ax_main.set_xlabel("parameter indices", fontsize=20)
        ax_top.set_title(f"Grid search {name}", fontsize=30)
        ax_main.get_xaxis().set_tick_params(labelsize=20)
        ax_main.set_xticks(range(len(x_labels)))
        ax_main.set_xticklabels(x_labels, rotation=45)

        text_point_size = int(4 * fig.dpi / points_per_inch * figsize[1] /
                              len(gps["params"]))
        xlim = ax_main.get_xlim()
        ylim = ax_main.get_ylim()

        xlow = xlim[0] + (xlim[1] - xlim[0]) / 100
        ylow = ylim[0] + (ylim[1] - ylim[0]) / 3
        ax_main.text(xlow, ylow, text_parameters, fontsize=text_point_size)

        for ax in ax_plot.values():
            ax.legend(loc="center right", fontsize=20)
        plotname = osjoin(out_dir, "GridSearchResults.png")
        plt.savefig(plotname)
        plt.close(fig)
Exemple #23
0
def save_stereo_calibration_params(D1, D2, E, F, K1, K2, P1, P2, Q, R, R1, R2,
                                   T, camera_params_path):
    np.save(osjoin(camera_params_path, "K1"), K1)
    np.save(osjoin(camera_params_path, "D1"), D1)
    np.save(osjoin(camera_params_path, "K2"), K2)
    np.save(osjoin(camera_params_path, "D2"), D2)
    np.save(osjoin(camera_params_path, "R"), R)
    np.save(osjoin(camera_params_path, "T"), T)
    np.save(osjoin(camera_params_path, "E"), E)
    np.save(osjoin(camera_params_path, "F"), F)
    np.save(osjoin(camera_params_path, "R1"), R1)
    np.save(osjoin(camera_params_path, "R2"), R2)
    np.save(osjoin(camera_params_path, "P1"), P1)
    np.save(osjoin(camera_params_path, "P2"), P2)
    np.save(osjoin(camera_params_path, "Q"), Q)
Exemple #24
0
    def do_GET(self):

        # Response status code

        print(self.path)

        request_gp, data_fields = self.path.split('?')

        if '.' in request_gp:
            self.send_response(403)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write(bytes('invalid directory', 'utf8'))
            return

        else:
            print('got request')

            script_dir = request_gp.split('/')[1]
            script_basename = request_gp.split('/')[2]

            script_name = f'{script_basename}.py'

            print([script_dir, script_basename, script_name])

            script_path = ''

            for f in listdir(config.VIRTUAL_DIRECTORY):
                if f == script_dir:
                    print('got dir')
                    for j in listdir(
                            osjoin(config.VIRTUAL_DIRECTORY, script_dir)):
                        if j.lower() == script_name.lower():
                            script_path = osjoin(config.VIRTUAL_DIRECTORY,
                                                 script_dir, script_name)
                            break
                    else:
                        print('cannot find file')
                        self.send_response(403)
                        self.send_header('Content-type', 'text/html')
                        self.end_headers()
                        self.wfile.write(bytes('invalid request', 'utf8'))
                        return
                    break
            else:
                print('cannot find directory')
                self.send_response(403)
                self.send_header('Content-type', 'text/html')
                self.end_headers()
                self.wfile.write(bytes('invalid request', 'utf8'))
                return

            scr_spec = importlib.util.spec_from_file_location(
                script_basename, script_path)

            script_mod = importlib.util.module_from_spec(scr_spec)

            scr_spec.loader.exec_module(script_mod)

            print(script_mod.CONFIG_INFO)

            data_list = data_fields.split('&')
            if len(data_list) != len(script_mod.CONFIG_INFO['inputs']):
                print('invalid inputs')
                self.send_response(403)
                self.send_header('Content-type', 'text/html')
                self.end_headers()
                self.wfile.write(bytes('invalid number of inputs', 'utf8'))

            params = []
            for i, data_item in enumerate(data_list):
                name, value = data_item.split('=')
                params.append(script_mod.CONFIG_INFO['inputs'][i](name, value))

            results = script_mod.CONFIG_INFO['main_function'](*params)

            result_data = {}
            for param in results:
                result_data[param.name] = param.value

            result_text = dumps(result_data)

            self.send_response(200)
            self.send_header('Content-type', 'text/html')
            self.end_headers()
            self.wfile.write(bytes(result_text, 'utf8'))
Exemple #25
0
import matplotlib.pyplot as plt
import pandas as pd
from os.path import join as osjoin

if __name__ == "__main__":
    # Les chemins entre les fichiers étant differents sous Windows et MacOS/Linux nous devons utiliser
    # os.path.join (ici renommé osjoin) pour naviguer entre les dossiers

    path_df = osjoin(osjoin("data", "process"), "toy_dataset.csv")
    df = pd.read_csv(path_df)

    plt.hist(df["monthly"])

    path_fig = osjoin(osjoin("rapport", "image"), "hist.png")
    plt.savefig(path_fig)
Exemple #26
0
    msg = "{0:<50}{1:<30}"
    hdr = msg.format("Warning","Difference (dev - ops)")
    border = "=" * len(hdr)

    # Check for basin name specifically by user
    if args.kw != None:
        ops_paths = [p for p in ops_paths if args.kw in p]

        # Wanr user if no matches found
        if len(ops_paths) == 0:
            out.error('{} not found in any ops paths'.format(args.kw))

    for r in ops_paths:
        basin_name = path_split(r)[-2]
        ops_f  = osjoin(r,"topo.nc")
        dev_f = osjoin(dev_dir,basin_name, "model_setup", "basin_setup", "topo.nc")
        ops = Dataset(ops_f)
        dev = Dataset(dev_f)
        out.msg("Checking extents of {}...".format(basin_name))

        warnings = []
        dimensional_issue = False

        for v in ["x", "y"]:

            # Check extents
            for op in ["min", "max"]:

                # Check the coordinates
                mo = getattr(ops.variables[v][:], op)()
Exemple #27
0
 def extract_files(self, files, tempdir):
     call([
         self.tar_command, self.extract_option, self.file_option, self.file,
         self.directory_option, tempdir
     ] + files)
     return ((f, osjoin(tempdir, f)) for f in files)
            velocity = make_3dfield(cube_size, powerlaw=vel, amp=vel_amp,
                                    randomseed=rseed_vel) * u.km / u.s

            # Deal with negative density values.
            density = make_3dfield(cube_size, powerlaw=dens, amp=dens_amp,
                                   randomseed=rseed_dens) * u.cm**-3
            density += density.std()
            density[density.value < 0.] = 0. * u.cm**-3

            # Save the raw 3D fields

            filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}_3D_vel.npy"\
                .format(np.abs(dens), np.abs(vel), i, cube_size)

            np.save(osjoin(out_dir, filename), velocity.value)

            filename = "fBM_density_{0:.2f}_velocity_{1:.2f}_rep_{2}_size_{3}_3D_dens.npy"\
                .format(np.abs(dens), np.abs(vel), i, cube_size)

            np.save(osjoin(out_dir, filename), density.value)

            # Also fit and record the best-fit field index
            vel_spec = threeD_pspec(velocity.value)
            vel_slope = linregress(np.log10(vel_spec[0][:-1]),
                                   np.log10(vel_spec[1][:-1]))
            vel_slopes.append(vel_slope.slope)

            dens_spec = threeD_pspec(density.value)
            dens_slope = linregress(np.log10(dens_spec[0][:-1]),
                                    np.log10(dens_spec[1][:-1]))
def do_gridsearch(names,
                  classifiers,
                  grid_params,
                  x_train,
                  y_train,
                  nkfolds,
                  out_dirs,
                  ncores=-1):
    """Hyperparameter grid search for a list of classifiers

    Given a list of classifiers, do a hyperparameter grid search based on a corresponding
    set of parameters

    Args:
        names: iteratable of classifier names
        classifiers: iterable of classifiers
        grid_params: iterable of parameters used to perform the grid search
        x_train: feature dataframe
        y_train: targets dataframe
        nkfolds: int, cross-validation generator or an iterable
        out_dirs: Write parameters and pickle of summary dataframe
        ncores: number of cores to distribute jobs to
    Returns:
        lists of grid search models, the best model and scoring dataframes
    """

    logger = get_logger()

    for clf_name, clf, gps, out_dir in zip(names, classifiers, grid_params,
                                           out_dirs):
        if not gps:
            logger.info("Nothing to be done for grid search of model %s",
                        clf_name)
            continue
        logger.info("Grid search for model %s with following parameters:",
                    clf_name)
        print_dict(gps)

        # To work for probabilities. This will call model.decision_function or
        # model.predict_proba as it is done for the nominal ROC curves as well to decide on the
        # performance
        scoring = get_scorers(gps["scoring"])

        grid_search = GridSearchCV(clf,
                                   gps["params"],
                                   cv=nkfolds,
                                   refit=gps["refit"],
                                   scoring=scoring,
                                   n_jobs=ncores,
                                   verbose=2,
                                   return_train_score=True)
        grid_search.fit(x_train, y_train)
        cvres = grid_search.cv_results_

        # Save the results as soon as we have them in case something goes wrong later
        # (would be quite unfortunate to loose grid search reults...)
        out_file = osjoin(out_dir, "results.pkl")
        pickle.dump(pd.DataFrame(cvres), openfile(out_file, "wb"), protocol=4)
        # Parameters
        dump_yaml_from_dict(gps, osjoin(out_dir, "parameters.yaml"))
        savemodels((clf_name, ), (grid_search.best_estimator_, ), out_dir, "")
Exemple #30
0
from staticstab import Polyped

from numpy import array,ones,pi,linspace
from pylab import plot,scatter,figure,show,cm,savefig,xlabel,ylabel,title

from os.path import join as osjoin

Nsubcyc = 500 # Number of subcycles to calculate
Nlatsep = 10 # Number of lateral seperations to calculate
amax = 2.0
amin = 0.1
tau = linspace(0,2*pi,Nsubcyc)
  # Define polyped parameters
g = 2.0*pi*ones((3))/4.0 # Relative phase
d = 2.0*pi*ones((4))*(3.0/4.0) # Duty cycle

for a in linspace(amin,amax,Nlatsep):
  x = array([[-0.4,a],[1.2,-a],[-0.8,-a],[1,a]]) # Touchdown positions
  lda = 0.3 # Stride length
  
  # Create a forward and reverse walker
  p0 = Polyped(g,d,x,lda)
  p1 = Polyped(-g,d,x,lda)
    
  plot(tau,array([p0.margin(t)-p1.margin(t) for t in tau]),c=cm.hot((a-amin)/(amax-amin)))

xlabel("fraction of stride")
ylabel("stability margin difference")
title("stability margin differences between forward and reverse walker\n(hotter colour is wider body)")
savefig(osjoin("figure","quadconfigs.png"))
Exemple #31
0
    def test_upload_and_retrieve(self):
        # Upload a random file
        _file = osjoin(self.testdir, 'test_file')
        test_md5 = write_random_file(_file)
        rv = self.app.post('/',
                           data={
                               'file':
                               (open(_file,
                                     'r'), 'test_pastefile_random.file'),
                           })
        self.assertEquals(rv.get_data(), "http://localhost/%s\n" % (test_md5))
        self.assertEquals(rv.status, '200 OK')

        # Get the file
        rv = self.app.get("/%s" % (test_md5), headers={'User-Agent': 'curl'})
        gotten_file = osjoin(self.testdir, 'gotten_test_file')
        gotten_test_md5 = write_file(filename=gotten_file,
                                     content=rv.get_data())

        self.assertEquals(test_md5, gotten_test_md5)
        self.assertEquals(rv.status, '200 OK')
        self.assertEquals(rv.headers['Content-Disposition'],
                          'attachment; filename=test_pastefile_random.file')

        # Try to re upload the same file. Should return same url
        rv = self.app.post('/',
                           data={
                               'file':
                               (open(_file,
                                     'r'), 'test_pastefile_random.file'),
                           })
        self.assertEquals(rv.get_data(), "http://localhost/%s\n" % (test_md5))

        # Try to upload a second file with the same filename. Both file should still available
        _file_bis = osjoin(self.testdir, 'test_file')
        test_md5_bis = write_random_file(_file_bis)
        rv = self.app.post('/',
                           data={
                               'file':
                               (open(_file_bis,
                                     'r'), 'test_pastefile_random.file'),
                           })
        self.assertEquals(rv.get_data(),
                          "http://localhost/%s\n" % (test_md5_bis))

        db_content = json.load(open(flaskr.app.config['FILE_LIST']))
        md5s = sorted([md5 for md5 in db_content.keys()])
        self.assertEquals(sorted([test_md5, test_md5_bis]), md5s)

        # can't lock the database, post should work for an existing file (using last test file)
        with mock.patch('pastefile.controller.JsonDB._lock',
                        mock.Mock(return_value=False)):
            # Take file from last test
            rv = self.app.post('/',
                               data={
                                   'file':
                                   (open(_file_bis,
                                         'r'), 'test_pastefile_random.file'),
                               })
        self.assertEquals(rv.get_data(),
                          "http://localhost/%s\n" % (test_md5_bis))

        # can't lock the database, get should work (using last test file)
        with mock.patch('pastefile.controller.JsonDB._lock',
                        mock.Mock(return_value=False)):
            # Take file from last test
            rv = self.app.get("/%s" % (test_md5_bis),
                              headers={'User-Agent': 'curl'})
        gotten_file = osjoin(self.testdir, 'gotten_test_file')
        gotten_test_md5 = write_file(filename=gotten_file,
                                     content=rv.get_data())
        self.assertEquals(test_md5_bis, gotten_test_md5)
        self.assertEquals(rv.status, '200 OK')

        # can't lock the database, post should NOT work for new file
        with mock.patch('pastefile.controller.JsonDB._lock',
                        mock.Mock(return_value=False)):
            _file = osjoin(self.testdir, 'test_file')
            test_md5 = write_random_file(_file)
            rv = self.app.post('/',
                               data={
                                   'file':
                                   (open(_file,
                                         'r'), 'test_pastefile_random.file'),
                               })
            self.assertTrue('Unable to upload the file' in rv.get_data())
Exemple #32
0
 def leave_doc_trail(self, path):
     with open(osjoin(path, self.DOC_TRAIL), 'wb') as f:
         pass
    '''
    from urllib import quote_plus
    generator = [(quote_plus(str(xx)) for xx in row) for row in x[fields]]
    return ["+".join(gen) for gen in generator]


# Get parameters and set up variables
input_origin       = arcpy.GetParameterAsText(0)
origin_fields      = arcpy.GetParameter(1)
input_destination  = arcpy.GetParameterAsText(2)
destination_fields = arcpy.GetParameter(3)
output_directory   = arcpy.GetParameterAsText(4)
output_filename    = arcpy.GetParameterAsText(5)

# Combine output location from parameters
output_location    = osjoin(output_directory, output_filename)

# Prefix ObjectID to location fields
origin_fields      = ["OID@"] + [str(field) for field in origin_fields]
destination_fields = ["OID@"] + [str(field) for field in destination_fields]
#arcpy.AddMessage(origin_fields)
#arcpy.AddMessage(destination_fields)


# Convert origins and destination layers with selected fields to NumPy arrays
origins      = FeatureClassToNumPyArray(input_origin, origin_fields)
destinations = FeatureClassToNumPyArray(input_destination, destination_fields)



# Create pairs (2-tuples) from feature IDs and API parameter strings
from turbustat.statistics import PowerSpectrum

from plotting_styles import twocolumn_figure

# Load model functions
repo_path = os.path.expanduser("~/ownCloud/project_code/DustyPowerSpectra/")
code_name = os.path.join(repo_path, "models.py")
exec(compile(open(code_name, "rb").read(), code_name, 'exec'))

# For M33, load in the HI and CO and make a gas surface density map with
# default-ish conversion factors.

data_path = os.path.expanduser("~/bigdata/ekoch/Utomo19_LGdust/")

hi_name = osjoin(data_path, "M31_HI",
                 "M31_14A_HI_contsub_width_04kms.image.pbcor.mom0.Kkms.fits")

co_name = osjoin(data_path, "M31_CO", "m31_iram_Kkms.fits")

dust_name = osjoin(
    data_path, "M31",
    r"m31_dust.surface.density_FB.beta=1.8_gauss46.3_regrid_bksub.fits")

co10_mass_conversion = 4.8 * (u.Msun / u.pc**2) / (u.K * u.km / u.s)

# Note that the top two conversions contain a 1.4x correction for He.
# So they will give the atomic mass, not the HI mass!
hi_mass_conversion = 0.0196 * (u.M_sun / u.pc**2) / (u.K * u.km / u.s)

hi_proj = Projection.from_hdu(fits.open(hi_name))
Exemple #35
0
run_pca = True
run_pdf = False
run_pspec = False
run_scf = False
run_moments = False
run_tsallis = False
run_vca = False
run_vcs = False
run_wavelet = False

# Bispectrum
if run_bispec:
    from turbustat.statistics import BiSpectrum

    moment0 = fits.open(
        osjoin(data_path, "Design4_flatrho_0021_00_radmc_moment0.fits"))[0]

    bispec = BiSpectrum(moment0)
    bispec.run(verbose=True,
               nsamples=10000,
               save_name=osjoin(fig_path, "bispectrum_design4.png"))

    # With mean subtraction
    bispec2 = BiSpectrum(moment0)
    bispec2.run(nsamples=10000, mean_subtract=True, seed=4242424)

    # Plot comparison w/ and w/o mean sub

    plt.subplot(121)
    plt.imshow(bispec.bicoherence, vmin=0, vmax=1, origin='lower')
    plt.title("Without mean subtraction")
Exemple #36
0
try:
    import pycountry
except:
    pycountry = None

if __name__ == '__main__':
    for j in ('', '.'):
        if j in sys.path:
            sys.stderr.write('Warning: deleting %r from sys.path\n', j)
            del sys.path[sys.path.index(j)]
    #
    a = os.path.realpath(sys.argv[0])
    a = os.path.dirname(a)
    a = os.path.dirname(a)
    COLDOC_SRC_ROOT = a
    a = osjoin(a, 'ColDocDjango')
    assert os.path.isdir(a), a
    if a not in sys.path:
        sys.path.insert(0, a)
    del a
    #
    from ColDoc import loggin

import logging
logger = logging.getLogger(__name__)

############# django

import django

from django.db import transaction
    # Nos quedamos solo con los grafos de las categorías visitadas
    graphs = {
        date: graphs[date].subgraph(data[date]['names'])
        for date in dates
    }

    ### Enriquecimiento por categorías

    ## Creación del category_mapping
    ## Como es un proceso manual, se hace una vez y se guarda
    # category_mapping = get_descendantsdict(children, 1)
    # category_mapping = category_mapping_helper(category_mapping)
    # with open(osjoin(path_datos_global, 'category_mapping_MLyStats.json'), 'w') as fp:
    #     json.dump(cat_map_4, fp, indent=4)
    ## Importamos
    with open(osjoin(path_datos_global, 'category_mapping_MLyStats.json'),
              'r') as fp:
        category_mapping = json.load(fp)

    ### Enriquecemos
    category_info = (category_mapping, names_ml, names_st)
    enrich_history(graphs, data, category_info, method='mapping_MLyStats')

    ### Agregamos clustering por infomap
    for date, g in graphs.items():
        calculate_infomap(g, directed=True)

    # Guardamos los grafos
    save_graphs(graphs, 'MLyStats', osjoin(path_git, 'Grafos_guardados'))

    ### Esto es si calculate_infomap requiere que le pase la compo gig
Exemple #38
0
import xbmcaddon
from xbmcvfs import translatePath
from xbmcgui import Dialog, DialogProgress
from os.path import join as osjoin

addon = xbmcaddon.Addon(uservar.pluginid)
addoninfo = addon.getAddonInfo
addon_id = addoninfo('id')
addon_version = addoninfo('version')
addon_name = addoninfo('name')
addon_icon = addoninfo("icon")
addon_fanart = addoninfo("fanart")
addon_profile = translatePath(addoninfo('profile'))
addon_path = translatePath(addoninfo('path'))
local_string = addon.getLocalizedString
dialog = Dialog()
dp = DialogProgress()
resources = osjoin(addon_path, 'resources/')
data = osjoin(resources, 'data')
icons = osjoin(resources, 'icons')
search_json = osjoin(addon_profile, 'search.json')
customiser_json = osjoin(addon_profile, 'customiser.json')
fav_json = osjoin(addon_profile, 'user_fav.json')
m3udata_json = osjoin(addon_profile, 'm3udata.json')
recentplayed_json = osjoin(addon_profile, 'recent_played.json')
icon_search = osjoin(icons, 'search.png')
icon_fav = osjoin(icons, 'favorites.png')
icon_recent = osjoin(icons, 'recent.png')
icon_settings = osjoin(icons, 'settings.png')

iso_country_codes = 'http://geohack.net/gis/wikipedia-iso-country-codes.csv'
Exemple #39
0
    def test_get(self):
        cm = self.contents_manager

        untitled_nb = 'Untitled.ipynb'
        untitled_txt = 'untitled.txt'
        for prefix, real_dir in iteritems(self.temp_dir_names):
            # Create a notebook
            model = cm.new_untitled(path=prefix, type='notebook')
            name = model['name']
            path = model['path']

            self.assertEqual(name, untitled_nb)
            self.assertEqual(path, pjoin(prefix, untitled_nb))
            self.assertTrue(exists(osjoin(real_dir, untitled_nb)))

            # Check that we can 'get' on the notebook we just created
            model2 = cm.get(path)
            assert isinstance(model2, dict)
            self.assertDictContainsSubset(
                {
                    'name': name,
                    'path': path
                },
                model2,
            )

            nb_as_file = cm.get(path, content=True, type='file')
            self.assertDictContainsSubset(
                {
                    'name': name,
                    'path': path,
                    'format': 'text'
                },
                nb_as_file,
            )
            self.assertNotIsInstance(nb_as_file['content'], dict)

            nb_as_bin_file = cm.get(path=path,
                                    content=True,
                                    type='file',
                                    format='base64')
            self.assertDictContainsSubset(
                {
                    'name': name,
                    'path': path,
                    'format': 'base64'
                },
                nb_as_bin_file,
            )
            self.assertNotIsInstance(nb_as_bin_file['content'], dict)

            # Test notebook in sub-directory
            sub_dir = 'foo'
            mkdir(osjoin(real_dir, sub_dir))
            prefixed_sub_dir = pjoin(prefix, sub_dir)

            cm.new_untitled(path=prefixed_sub_dir, ext='.ipynb')
            self.assertTrue(exists(osjoin(real_dir, sub_dir, untitled_nb)))

            sub_dir_nbpath = pjoin(prefixed_sub_dir, untitled_nb)
            model2 = cm.get(sub_dir_nbpath)
            self.assertDictContainsSubset(
                {
                    'type': 'notebook',
                    'format': 'json',
                    'name': untitled_nb,
                    'path': sub_dir_nbpath,
                },
                model2,
            )
            self.assertIn('content', model2)

            # Test .txt in sub-directory.
            cm.new_untitled(path=prefixed_sub_dir, ext='.txt')
            self.assertTrue(exists(osjoin(real_dir, sub_dir, untitled_txt)))

            sub_dir_txtpath = pjoin(prefixed_sub_dir, untitled_txt)
            file_model = cm.get(path=sub_dir_txtpath)
            self.assertDictContainsSubset(
                {
                    'content': '',
                    'format': 'text',
                    'mimetype': 'text/plain',
                    'name': 'untitled.txt',
                    'path': sub_dir_txtpath,
                    'type': 'file',
                    'writable': True,
                },
                file_model,
            )
            self.assertIn('created', file_model)
            self.assertIn('last_modified', file_model)

            # Test directory in sub-directory.
            sub_sub_dirname = 'bar'
            sub_sub_dirpath = pjoin(prefixed_sub_dir, sub_sub_dirname)
            cm.save(
                {
                    'type': 'directory',
                    'path': sub_sub_dirpath
                },
                sub_sub_dirpath,
            )
            self.assertTrue(exists(osjoin(real_dir, sub_dir, sub_sub_dirname)))
            sub_sub_dir_model = cm.get(sub_sub_dirpath)
            self.assertDictContainsSubset(
                {
                    'type': 'directory',
                    'format': 'json',
                    'name': sub_sub_dirname,
                    'path': sub_sub_dirpath,
                    'content': [],
                },
                sub_sub_dir_model,
            )

            # Test list with content on prefix/foo.
            dirmodel = cm.get(prefixed_sub_dir)
            self.assertDictContainsSubset(
                {
                    'type': 'directory',
                    'path': prefixed_sub_dir,
                    'name': sub_dir,
                },
                dirmodel,
            )
            self.assertIsInstance(dirmodel['content'], list)
            self.assertEqual(len(dirmodel['content']), 3)

            # Request each item in the subdirectory with no content.
            nbmodel_no_content = cm.get(sub_dir_nbpath, content=False)
            file_model_no_content = cm.get(sub_dir_txtpath, content=False)
            sub_sub_dir_no_content = cm.get(sub_sub_dirpath, content=False)

            for entry in dirmodel['content']:
                # Order isn't guaranteed by the spec, so this is a hacky way of
                # verifying that all entries are matched.
                if entry['path'] == sub_sub_dir_no_content['path']:
                    self.assertEqual(entry, sub_sub_dir_no_content)
                elif entry['path'] == nbmodel_no_content['path']:
                    self.assertEqual(entry, nbmodel_no_content)
                elif entry['path'] == file_model_no_content['path']:
                    self.assertEqual(entry, file_model_no_content)
                else:
                    self.fail("Unexpected directory entry: %s" % entry)
exec(compile(open(code_name, "rb").read(), code_name, 'exec'))

twocolumn_figure()

gals = {
    'LMC': 50.1 * u.kpc,
    'SMC': 62.1 * u.kpc,
    'M31': 744 * u.kpc,
    'M33': 840 * u.kpc
}

# Running on SegFault w/ data on bigdata
data_path = os.path.expanduser("~/bigdata/ekoch/Utomo19_LGdust/")

# Make a plot output folder
plot_folder = osjoin(data_path, "summary_plots")
if not os.path.exists(plot_folder):
    os.mkdir(plot_folder)

fitinfo_dict = dict()

fitinfo_dict["LMC"] = {
    'filename':
    r"lmc_dust.surface.density_FB.beta=1.8_gauss53.4_regrid_mwsub.fits",
    'beam': Beam(53.4 * u.arcsec),
    'apod_kern': None,
    'low_int_cut': None,
    'high_int_cut': None,
    'low_cut': None,
    'high_cut': None,
    'distance': 50.1 * u.kpc,
    def test_get(self):
        cm = self.contents_manager

        untitled_nb = 'Untitled.ipynb'
        untitled_txt = 'untitled.txt'
        for prefix, real_dir in iteritems(self.temp_dir_names):
            # Create a notebook
            model = cm.new_untitled(path=prefix, type='notebook')
            name = model['name']
            path = model['path']

            self.assertEqual(name, untitled_nb)
            self.assertEqual(path, pjoin(prefix, untitled_nb))
            self.assertTrue(
                exists(osjoin(real_dir, untitled_nb))
            )

            # Check that we can 'get' on the notebook we just created
            model2 = cm.get(path)
            assert isinstance(model2, dict)
            self.assertDictContainsSubset(
                {'name': name, 'path': path},
                model2,
            )

            nb_as_file = cm.get(path, content=True, type='file')
            self.assertDictContainsSubset(
                {'name': name, 'path': path, 'format': 'text'},
                nb_as_file,
            )
            self.assertNotIsInstance(nb_as_file['content'], dict)

            nb_as_bin_file = cm.get(
                path=path,
                content=True,
                type='file',
                format='base64'
            )
            self.assertDictContainsSubset(
                {'name': name, 'path': path, 'format': 'base64'},
                nb_as_bin_file,
            )
            self.assertNotIsInstance(nb_as_bin_file['content'], dict)

            # Test notebook in sub-directory
            sub_dir = 'foo'
            mkdir(osjoin(real_dir, sub_dir))
            prefixed_sub_dir = pjoin(prefix, sub_dir)

            cm.new_untitled(path=prefixed_sub_dir, ext='.ipynb')
            self.assertTrue(exists(osjoin(real_dir, sub_dir, untitled_nb)))

            sub_dir_nbpath = pjoin(prefixed_sub_dir, untitled_nb)
            model2 = cm.get(sub_dir_nbpath)
            self.assertDictContainsSubset(
                {
                    'type': 'notebook',
                    'format': 'json',
                    'name': untitled_nb,
                    'path': sub_dir_nbpath,
                },
                model2,
            )
            self.assertIn('content', model2)

            # Test .txt in sub-directory.
            cm.new_untitled(path=prefixed_sub_dir, ext='.txt')
            self.assertTrue(exists(osjoin(real_dir, sub_dir, untitled_txt)))

            sub_dir_txtpath = pjoin(prefixed_sub_dir, untitled_txt)
            file_model = cm.get(path=sub_dir_txtpath)
            self.assertDictContainsSubset(
                {
                    'content': '',
                    'format': 'text',
                    'mimetype': 'text/plain',
                    'name': 'untitled.txt',
                    'path': sub_dir_txtpath,
                    'type': 'file',
                    'writable': True,
                },
                file_model,
            )
            self.assertIn('created', file_model)
            self.assertIn('last_modified', file_model)

            # Test directory in sub-directory.
            sub_sub_dirname = 'bar'
            sub_sub_dirpath = pjoin(prefixed_sub_dir, sub_sub_dirname)
            cm.save(
                {'type': 'directory', 'path': sub_sub_dirpath},
                sub_sub_dirpath,
            )
            self.assertTrue(exists(osjoin(real_dir, sub_dir, sub_sub_dirname)))
            sub_sub_dir_model = cm.get(sub_sub_dirpath)
            self.assertDictContainsSubset(
                {
                    'type': 'directory',
                    'format': 'json',
                    'name': sub_sub_dirname,
                    'path': sub_sub_dirpath,
                    'content': [],
                },
                sub_sub_dir_model,
            )

            # Test list with content on prefix/foo.
            dirmodel = cm.get(prefixed_sub_dir)
            self.assertDictContainsSubset(
                {
                    'type': 'directory',
                    'path': prefixed_sub_dir,
                    'name': sub_dir,
                },
                dirmodel,
            )
            self.assertIsInstance(dirmodel['content'], list)
            self.assertEqual(len(dirmodel['content']), 3)

            # Request each item in the subdirectory with no content.
            nbmodel_no_content = cm.get(sub_dir_nbpath, content=False)
            file_model_no_content = cm.get(sub_dir_txtpath, content=False)
            sub_sub_dir_no_content = cm.get(sub_sub_dirpath, content=False)

            for entry in dirmodel['content']:
                # Order isn't guaranteed by the spec, so this is a hacky way of
                # verifying that all entries are matched.
                if entry['path'] == sub_sub_dir_no_content['path']:
                    self.assertEqual(entry, sub_sub_dir_no_content)
                elif entry['path'] == nbmodel_no_content['path']:
                    self.assertEqual(entry, nbmodel_no_content)
                elif entry['path'] == file_model_no_content['path']:
                    self.assertEqual(entry, file_model_no_content)
                else:
                    self.fail("Unexpected directory entry: %s" % entry)
RCA_DIR = sys.argv[1]
ST_FILE = sys.argv[2]
# DIS_GROUP = sys.argv[3]
AGG_GINI_PATH = sys.argv[3]

st_df = pd.read_csv(ST_FILE, sep="\t", names=["COUNTRY", "ST"])
# dis_group = pd.read_csv(DIS_GROUP, sep="\t", header=None,  names=["DIS", "GROUP"])

root, basename = os.path.split(RCA_DIR)
rca_all_list = []
print(root)
print(basename)
for filename in os.listdir(root):
    if basename in filename:
        rca_df = pd.read_csv(osjoin(root, filename))
        rca_df = pd.melt(rca_df,
                         id_vars=["COUNTRY"],
                         var_name="DIS",
                         value_name="VALUES")
        year = os.path.splitext(filename)[0].split("_")[2]
        rca_df["YEAR"] = year
        rca_all_list.append(rca_df)

rca_all_df = pd.concat(rca_all_list, ignore_index=True)
# rca_all_df = rca_all_df.merge(right=dis_group, right_on="DIS", left_on="DIS")
rca_all_df = rca_all_df[rca_all_df.YEAR != "1973-2017"]

rca_all_df.merge(right=st_df, left_on="COUNTRY",
                 right_on="COUNTRY").to_csv(AGG_GINI_PATH, index=False)
Exemple #43
0
'''Utilities for opening files or URLs in the registered default application
and for sending e-mail using the user's preferred composer.

'''

__version__ = '1.0'
__all__ = ['open', 'mailto']

import os
import sys
import webbrowser
try:
    import subprocess
except ImportError:
    from os.path import join as osjoin
    sys.path.append(osjoin("..", "formularios"))
    import subprocess

from email.Utils import encode_rfc2231

_controllers = {}
_open = None


class BaseController(object):
    '''Base class for open program controllers.'''

    def __init__(self, name):
        self.name = name

    def open(self, filename):
Exemple #44
0
# Contour levels for the two plots
V0 = linspace(-0.7,2.5,17)+0.1
V1 = linspace(-0.875,2.5,10)
fs = 18 # Font size

# Contour plot of the minimum stability margin
Bpp = -pi/2-B*2*pi
fig = figure(figsize=(12,6),dpi=100)
ax0 = fig.add_subplot(1,2,1)
ax0.contourf(Bpp,A,Mbm,V0,cmap=cmgreen)
C = ax0.contour(Bpp,A,Mbm,V0,colors='k',linewidths=3)
ax0.clabel(C,c='k',fontsize=fs)
ax0.set_axis_bgcolor('k')
ax0.set_xticks([-3*pi/2,-pi,-pi/2,0,pi/2])
ax0.set_xticklabels(["$-3\pi/2$","$-\pi$","$-\pi/2$","$0$","$\pi/2$"])
ylabel("duty factor (\%)")

# Plot the average stability margin
ax1 = fig.add_subplot(1,2,2)
ax1.contourf(Bpp,A,Mba,V1,cmap=cmgreen)
C = ax1.contour(Bpp,A,Mba,V1,colors='k',linewidths=3)
ax1.clabel(C,c='k',fontsize=fs)
ax1.set_axis_bgcolor('k')
ax1.set_xticks([-3*pi/2,-pi,-pi/2,0,pi/2])
ax1.set_xticklabels(["$-3\pi/2$","$-\pi$","$-\pi/2$","$0$","$\pi/2$"])
xlabel("projected distance to trot $\lambda$ (rad)")

savefig(osjoin("figure","stabilitycontours.pdf"))
savefig(osjoin("figure","stabilitycontours.png"))
savefig(osjoin("figure","stabilitycontours.svg"))
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""
__docformat__ = "restructuredtext en"


import argparse
import os
from os.path import join as osjoin
#import shutil
import subprocess

BASE_PATH = osjoin('/', 'media', 'asb_share')
DEFAULT_STATION_PATH = osjoin(BASE_PATH, 'stations')
DEFAULT_BACKUP_PATH = osjoin(BASE_PATH, 'data')


def backup(src_path, run_path, backup_path):
    print "Backing up %s to %s" % (src_path, backup_path)
    if not os.path.isdir(backup_path):
        # shutil.copytree had errors os I have to call out to cp
        # shutil.copytree(src_path, dist_path)
        retval = subprocess.check_call(["mkdir", "-p", run_path])
        retval = subprocess.check_call(["cp", "-R", src_path, backup_path])
    else:
        print "Directory %s exists, skipping..." % backup_path

Exemple #46
0
 def extract_files(self, files, tempdir):
     call([self.unzip_command, self.file] + files + \
              [self.directory_option, tempdir])
     return ((f, osjoin(tempdir, f)) for f in files)
Exemple #47
0
PLATFORM=platform().lower()
PROJECT_DIR=dirname(abspath(__file__))

if 'windows' in PLATFORM:
    if exists(oenviron['LOCALAPPDATA']+r'\Google\Chrome\Application\chrome.exe'):
        CHROME_EXEC_PATH=oenviron['LOCALAPPDATA']+r'\Google\Chrome\Application\chrome.exe'
    else:
        CHROME_EXEC_PATH = r'c:\Program Files (x86)\Google\Chrome\Application\chrome.exe'
    CHROME_USER_DATA= oenviron['LOCALAPPDATA'] + r"/Google/Chrome/User Data"
    if exists(r'c:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'):
        CHROMEDRIVER_EXEC_PATH = r'c:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
    else:
        CHROMEDRIVER_EXEC_PATH = oenviron['LOCALAPPDATA']+r'\Google\Chrome\Application\chromedriver.exe'
elif 'linux' in PLATFORM:
    CHROME_EXEC_PATH='/opt/google/chrome/google-chrome'
    CHROME_USER_DATA=osjoin(PROJECT_DIR,'userdata')
    CHROMEDRIVER_EXEC_PATH='chromedriver'
else:
    raise SystemError('platform should be windows or linux')

CHROME_COOKIES_PATH= osjoin(CHROME_USER_DATA,"Default/Cookies")
ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/74.0.3729.169 Mobile/13B143 Safari/601.1.46'

ua_pc = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"

BROWSERLESS_ENGINE='ws://browserless:3000' if 'linux' in PLATFORM else 'ws://localhost:4001'  # 'wss://chrome.browserless.io'
MAIL163_ENGINE = 'http://localhost:4002/mail163_login'
LINKEIN_ENGINE = 'http://localhost:4002/linkedin_login'
LOCAL_ENGINE='local'
# PYPPETEER_ENGINE 是表示浏览器如何连接的选项,docker里browserless/chrome,本地的local
PYPPETEER_ENGINE=LOCAL_ENGINE
Exemple #48
0
"""
aggeragate the nestedness modularity calculation into one file
"""
import fnmatch
import pandas as pd
import os
from os.path import join as osjoin
import sys

NESTMODU_DIR = sys.argv[1]
AGG_NESTMODU_PATH = sys.argv[2]

collist = ['NodfRes', 'ModuRes', 'Flag', 'YEAR']
NestModu_all = pd.DataFrame(columns=collist)

for filename in os.listdir(NESTMODU_DIR):
    NestModu_df = pd.read_csv(osjoin(NESTMODU_DIR, filename))
    year = os.path.splitext(filename)[0].split("_")[2]
    NestModu_df["YEAR"] = year
    NestModu_all = pd.concat([NestModu_all, NestModu_df], ignore_index=True)
NestModu_all.to_csv(AGG_NESTMODU_PATH, index=False)
print("Spec_compare Spectral Processing and comparison program") 
print("J. Smith v.1.1 24 June")
print("+++++++++++++++++++++++++++++++++++++++++++++++++")

# prompt the user to select the file
option=[] # List to store file number choices
xs = np.linspace(100, 4500, 1000) # Define basis for spline grid
num=0
while num<5:
    print("+++++++++++++++++\n++ Available Spectral Files ++\n+++++++++++++++++")
    for i in range(len(onlycsv)):
        print( i, onlycsv[i] )
    option.append(int(input("-> Select a file ({}) by number or none (-1): ".format(num))))
    if option[-1]==-1:
        break
    filename = osjoin(mypath, onlycsv[option[-1]])
    print("File selected: ",onlycsv[option[-1]])
    headers = ["wavenumber","AU","W1","W2","W3","WG"]
    data = pd.read_csv(filename, sep=',', skiprows=1, names = headers, engine='python')
    #print(data.head)
    x = data["wavenumber"]
    y = data["AU"]
    if num==-1:
        specx=x
        specy=y
    else:
        print("num: ",num)
        specx.append(x)
        specy.append(y)
    spl = UnivariateSpline(x, y,k=4,s=0)        
    indexes = find_peaks_cwt(y, np.arange(1, 550),noise_perc=95)
Exemple #50
0
    # Check for basin name specifically by user
    if args.kw != None:
        ops_paths = [p for p in ops_paths if args.kw in p]

        # Wanr user if no matches found
        if len(ops_paths) == 0:
            out.error('{} not found in any ops paths'.format(args.kw))

    for r in ops_paths:
        basin_name = path_split(r)[-2]
        if basin_name == 'brb':
            dev_basin_name = 'boise'
        else:
            dev_basin_name = basin_name

        ops_f = osjoin(r, "topo.nc")
        dev_f = osjoin(dev_dir, dev_basin_name, "topo", "basin_setup",
                       "topo.nc")
        if not isfile(dev_f):
            out.warn(
                "Unable to compare ops {} topo since the development version doesn't exist"
                .format(basin_name))
        else:
            ops = Dataset(ops_f)
            dev = Dataset(dev_f)
            out.msg("Checking extents of {}...".format(basin_name))

            warnings = []
            dimensional_issue = False

            for v in ["x", "y"]:
 '''
 # Generate some very simple but shifted test
 # data.
 y = simpleTestData(T=[0.1,0.0])
 # Manually specify the shape to reflect in
 A = array([[42.2,0.3],[0.3,4.7]])
 R = EllipseReflector(2,2,A=A)
 yp = R(y)
 # Get ellipse
 ex,ey = R.getEllipse()
 pl.plot(ex,ey,c='k')
 pl.scatter(*y.T,c='r',alpha=0.3)
 pl.scatter(*yp.T,c='b',alpha=0.3)
 pl.xlabel("horizontal position (arb)")
 pl.ylabel("vertical position (arb)")
 pl.savefig(osjoin("..","..","graphics","ellipseReflectExample.png"))
 
 # Check jacobian of ellipse reflector
 delta = 1e-9
 Delta = delta*eye(2)
 R = EllipseReflector(2,2,q=2.0)
 y0 = array([[0.01,0.01]])
 print R.derv(y0) - array([(R(y0+d)-R(y0))/delta for d in Delta])
 
 y0 = array([[0.11,0.22,0.33,0.44]])
 er = EllipseReflector(D2=4,D=4,q=2)
 delta = 1e-9
 Delta = delta*eye(4)  
 print array([(er(y0+d)-er(y0))/delta for d in Delta])[:,0,:]-er.derv(y0)
 er = EllipseReflector(D2=4,D=3,q=1.3)
 print array([(er(y0[:,:3]+d[:3])-er(y0[:,:3]))/delta for d in Delta])[:3,0,:]-er.derv(y0[:,:3])
run_mvc = False
run_pca = False
run_pdf = False
run_pspec = False
run_scf = False
run_moments = False
# run_tsallis = False
run_vca = False
run_vcs = False
run_wavelet = False

# Bispectrum
if run_bispec:
    from turbustat.statistics import Bispectrum_Distance

    moment0 = fits.open(osjoin(data_path, "Design4_flatrho_0021_00_radmc_moment0.fits"))[0]
    moment0_fid = fits.open(osjoin(data_path, "Fiducial0_flatrho_0021_00_radmc_moment0.fits"))[0]

    bispec = Bispectrum_Distance(moment0_fid, moment0,
                                 stat_kwargs={'nsamples': 10000})
    bispec.distance_metric(verbose=True,
                           save_name=osjoin(fig_path, "bispectrum_distmet.png"))

    print(bispec.surface_distance)
    print(bispec.mean_distance)

# Cramer Statistic
if run_cramer:

    from turbustat.statistics import Cramer_Distance
 def image_dir(self, filename):
     return 'memes/' + osjoin(str(self.template.name), filename)
run_mvc = False
run_pca = False
run_pdf = False
run_pspec = False
run_scf = False
run_moments = False
run_tsallis = False
run_vca = False
run_vcs = False
run_wavelet = False

# Bispectrum
if run_bispec:
    from turbustat.statistics import BiSpectrum

    moment0 = fits.open(osjoin(data_path, "Design4_flatrho_0021_00_radmc_moment0.fits"))[0]

    bispec = BiSpectrum(moment0)
    bispec.run(verbose=True, nsamples=10000,
               save_name=osjoin(fig_path, "bispectrum_design4.png"))

    # With mean subtraction
    bispec2 = BiSpectrum(moment0)
    bispec2.run(nsamples=10000, mean_subtract=True, seed=4242424)

    # Plot comparison w/ and w/o mean sub

    plt.subplot(121)
    plt.imshow(bispec.bicoherence, vmin=0, vmax=1, origin='lower')
    plt.title("Without mean subtraction")
    plt.subplot(122)
Exemple #55
0
Model and compare the stacked profiles from total_stacked_profiles_lowres.py
'''

figure_folder = allfigs_path("stacked_profiles")
if not os.path.exists(figure_folder):
    os.mkdir(figure_folder)


dr = 500 * u.pc
max_radius = (7.0 * u.kpc).to(u.pc)
wstring = "{0}{1}".format(int(dr.value), dr.unit)
maxrad_string = "{0}{1}".format(int(max_radius.value), max_radius.unit)

# Load the CO stacks

co_stackpath = lambda x: osjoin(iram_co21_14B088_data_path("smooth_2beam/stacked_spectra"), x)

total_spectrum_co_cent = OneDSpectrum.from_hdu(fits.open(co_stackpath("centroid_stacked_{}.fits".format(maxrad_string))))
total_spectrum_co_peakvel = OneDSpectrum.from_hdu(fits.open(co_stackpath("peakvel_stacked_{}.fits".format(maxrad_string))))

# Load the total HI profiles in
hi_stackpath = lambda x: osjoin(fourteenB_HI_data_wGBT_path("smooth_2beam/stacked_spectra"), x)

total_spectrum_hi_cent = OneDSpectrum.from_hdu(fits.open(hi_stackpath("centroid_stacked_{}.fits".format(maxrad_string))))
total_spectrum_hi_peakvel = OneDSpectrum.from_hdu(fits.open(hi_stackpath("peakvel_stacked_{}.fits".format(maxrad_string))))

spectra = [total_spectrum_co_cent,
           total_spectrum_co_peakvel]
hi_spectra = [total_spectrum_hi_cent,
              total_spectrum_hi_peakvel]
co_fit_vals = {}
    LICENSE
    Tom at [email protected]
    some rights reserved, 2011
"""
#
# Imports
#
# Python Imports
import sys, os, pdb, logging
from os.path import (abspath, dirname, join as osjoin, exists)
from datetime import(datetime, date, timedelta)
from random import (randint, choice, sample)
import hashlib

# Extend sys.path
PROJECT_PATH = abspath(osjoin(dirname(__file__), '../..'))
if PROJECT_PATH not in sys.path:
    sys.path.append(PROJECT_PATH)

# App Engine Imports
from google.appengine.ext import (testbed, db)

# Appswell Imports
from framework.lib.testing import (AppswellUnitTest, run_test_from_command_line)
from framework.lib import (multicache)


#
# Module Parameters
#
# Test Configuration
                         "gtol": 2.0e-5,
                         "ftol": 2.0e-7,
                         "maxiter": 100,
                         "maxls": 15
                     })

    return m_opt


#########################################################################
# MAIN
# call the function with: python <this file>
########################################################################

output_dir = "./output/"
f_log = open(osjoin(output_dir, 'log.txt'), 'a')
rat_num = sys.argv[1]
rat_idx = int(sys.argv[2])
day_idx = int(sys.argv[3])

# Days data and time steps
input_dir = "../rat-data/rat" + rat_num + "/"
alldata = sc_io_loadmat("../rat-data/finaldata.mat", )
days = alldata['rat'][0][rat_idx][3][0]
days[:] = [x - days[0] for x in days]
day = days[day_idx]
steps = []
for i in range(1, len(days)):
    steps.append(days[i] - days[i - 1])

# Constant inputs for optimization
Exemple #58
0
 def extract_files(self, files, tempdir):
     call([self.p7z_command, self.extract_option, \
               self.directory_option + tempdir, self.file] + files)
     return ((f, osjoin(tempdir, f)) for f in files)
    X = array([(2 + cos(x0[..., 0])) * cos(x0[..., 1]),
               (2 + cos(x0[..., 0])) * sin(x0[..., 1]),
               -sin(x0[..., 0])]).T  # Training data
    Y = array([(2 + cos(som.xmap[0])) * cos(som.xmap[1]),
               (2 + cos(som.xmap[0])) * sin(som.xmap[1]),
               -sin(som.xmap[0])]).T  # SOM
    theta = linspace(0., 2. * pi, 100)
    u, v = meshgrid(theta, theta)
    Z = array([(2 + cos(u)) * cos(v), (2 + cos(u)) * sin(v),
               -sin(u)]).T  # The torus

    ax = fig.add_subplot(122, projection='3d')
    ax.scatter(X[..., 0],
               X[..., 1],
               X[..., 2],
               color='b',
               marker='x',
               s=9,
               lw=1,
               alpha=0.5)
    ax.scatter(Y[..., 0],
               Y[..., 1],
               Y[..., 2],
               color='r',
               marker='o',
               s=36,
               lw=2)
    ax.plot_wireframe(Z[..., 0], Z[..., 1], Z[..., 2], alpha=0.1)
    axis('off')
    savefig(osjoin("..", "figure", "toroidalsomexample.png"))