Esempio n. 1
0
def get_products():
    filename = "results/" + get_file_name()
    if not os.path.isfile(filename):
        fetch_products()
    with open(filename) as json_file:
        return json.load(json_file)
    return []
Esempio n. 2
0
def spider_result():
    results = []

    def crawler_results(signal, sender, item, response, spider):
        results.append(item)

    dispatcher.connect(crawler_results, signal=signals.item_passed)

    process = CrawlerProcess(get_project_settings())
    process.crawl(AllProductsSpider)
    process.start()

    with open("results/" + get_file_name(), "w", encoding="utf-8") as f:
        json.dump(results, f, ensure_ascii=False, indent=4)
Esempio n. 3
0
    def create_files_info(self,file_paths,file_sizes=None,
                               create_md5=False,rel_file_base=None):
        """ create dict of file info for the info section of meta data.
            file_paths can also be a dict who's key is the file path
            and the value is the file size """

        if not file_sizes:
            file_sizes = determine_file_sizes(file_paths)

        files_info = []
        # go through our files adding thier info dict
        for path in file_paths:
            name = get_file_name(path,rel_file_base)
            file_info = {
                'length': file_sizes.get(path),
                'path': [x for x in name.split(os.sep) if x.strip()]
            }
            if create_md5:
                file_info['md5sum'] = md5sum(path)
            files_info.append(file_info)

        return files_info
Esempio n. 4
0
 def open(self, path, flags):
     if path.startswith('/actual_context/'):
         file_name = get_file_name(path)
         if file_name in self.data_source.map:
             return os.open(self.data_source.map[file_name], flags)
     elif path.startswith(localization_directory):
         info = get_path_info(path)
         if info['divisions'] == 4:
             if info['file_name'] in self.data_source.file_by_localization_map[
                     info['parent_directory']]:
                 return os.open(
                     self.data_source.file_by_localization_map[
                         info['parent_directory']][info['file_name']],
                     flags)
     elif path.startswith(event_directory):
         info = get_path_info(path)
         if info['divisions'] == 4:
             if info['file_name'] in self.data_source.file_by_event_map[
                     info['parent_directory']]:
                 return os.open(
                     self.data_source.file_by_event_map[
                         info['parent_directory']][info['file_name']],
                     flags)
     return os.open(self._full_path(path), flags)
## read in results
nfiles = len(file_name)
## variable demand
cost_unmet_demand = []
system_cost = []
demand = []
unmet_demand = []
## constant demand
cost_unmet_demand_c = []
system_cost_c = []
demand_c = []
unmet_demand_c = []

for i in range(nfiles):

    f_name = get_file_name(file_path + file_name[i], DATE, NEW_FILES)
    df = pd.read_csv(f_name)
    cost_unmet_demand.append(df['var cost unmet demand ($/kWh)'].values)
    system_cost.append(df['system cost ($/kW/h)'].values)
    demand.append(df['mean demand (kW)'].values)
    unmet_demand.append(df['dispatch unmet demand (kW)'].values)

    f_namec = get_file_name(file_path + file_namec[i], DATE, NEW_FILES)
    df_c = pd.read_csv(f_namec)
    cost_unmet_demand_c.append(df_c['var cost unmet demand ($/kWh)'].values)
    system_cost_c.append(df_c['system cost ($/kW/h)'].values)
    demand_c.append(df_c['mean demand (kW)'].values)
    unmet_demand_c.append(df_c['dispatch unmet demand (kW)'].values)

## calculations
## reliability (fraction of demand met)
Esempio n. 6
0
this_dir = os.path.dirname(__file__)
os.chdir(this_dir)
samples_dir = os.path.join(this_dir, os.pardir, 'CurveFinderHelperSamples')

input_f = arcpy.GetParameterAsText(0)
output_workspace = arcpy.GetParameterAsText(1) or r'C:\_temp_smooth.gdb'
debug = str(arcpy.GetParameterAsText(2)).lower() == 'true'

is_feet = helpers.is_feet(input_f)
is_meters = helpers.is_meters(input_f)

if not (is_meters or is_feet):
    arcpy.AddError("Input feature class must be in feet or meters")
    exit(0)

input_name = helpers.get_file_name(input_f)

if output_workspace.find('.gdb') > -1:
    if arcpy.Exists(output_workspace):
        arcpy.env.workspace = output_workspace

        for fc in arcpy.ListFeatureClasses():
            arcpy.Delete_management(fc)
    else:
        arcpy.CreateFileGDB_management(os.path.dirname(output_workspace),
                                       os.path.basename(output_workspace))
else:

    if not os.path.isdir(output_workspace):
        os.mkdir(output_workspace)
Esempio n. 7
0
    def create_info_dict(self,file_paths,pieces=None,file_sizes=None,
                         piece_size=None,total_size=None,
                         private=False,create_md5=False,file_name=None,
                         rel_file_base=None):
        """ creates a dict of the 'info' part of the meta data """
        # fill out our data
        if not file_sizes:
            file_sizes = determine_file_sizes(file_paths)
        if not total_size:
            total_size = sum(file_sizes.itervalues())
        if not piece_size:
            piece_size = determine_piece_size(total_size)

        # create our meta data dict
        info_data = {
            'piece length': piece_size,
            'pieces': ''.join(pieces),
            'private': 1 if private else 0,
        }

        # don't have to have a file name
        if file_name:
            info_data['name'] = file_name

        # we need to determine common prefix for all the files
        # it will be our rel base, any paths for the info will
        # be relative to it
        rel_file_base = os.path.commonprefix(file_paths)

        log.debug('rel file base: %s',rel_file_base)

        # length only appropriate if there is a single file
        if len(file_paths) == 1:
            info_data['length'] = total_size

            # if they want us to create the optional md5
            # for the files than lets do so
            if create_md5:
                info_data['md5sum'] = md5sum(file_paths[0])

            if not info_data.get('name'):
                # we'll go ahead and put a name
                info_data['name'] = get_file_name(file_paths[0],
                                                  rel_file_base)

        # if it's multiple files we give it each one individually
        else:
            info_data['files'] = self.create_files_info(file_paths,
                                                        file_sizes,
                                                        create_md5,
                                                        rel_file_base)

            if not info_data.get('name'):
                # guess a name
                name = get_common_name(file_paths)
                if name:
                    info_data['name'] = name

        # make sure our meta info is valid
        try:
            validate_info_data(info_data)
        except Exception, ex:
            raise