Пример #1
0
 def __str__(self):
     s = ""
     s += "Filename: " + xstr(self.filename) + "\n"
     s += "NetworkData: \n" + xstr(self.networkData) + "\n"
     s += "Seed: " + xstr(self.seed) + "\n"
     s += "Time: " + xstr(self.time) + "\n"
     return s
Пример #2
0
 def returnParametersAsList(self, groupid, ignore_aggregates=False):
     parameterList = ""
     counter = 1
     # Traverse through each parameter in object's list
     for param in self.parameters:
         new_param = utils.xstr(self.name + "." + param)
         is_changed = False
         if self.ignore_aggregates == False:
             # Apply any functions to the parameter, ie SUM, AVG, etc.
             new_param, alias, is_changed = export_utils.apply_selection_function(
                 utils.xstr(self.name + "." + param), groupid
             )
         # For subqueries it is nessesary to create an alias for the aggregation function
         if is_changed:
             new_param += " as " + param
             # Concatenate to parameterList and add a comma if not the end of the list
         parameterList += new_param
         if counter != len(self.parameters):
             parameterList += ","
         counter += 1
     if "uuid" not in self.parameters:
         if parameterList is not "":
             parameterList += "," + self.name + ".uuid"
         else:
             parameterList += self.name + ".uuid"
     return parameterList
Пример #3
0
 def printStats(self):
     for idx, corpus in enumerate(self.corpus):
         d = (idx, utils.shortSeed(corpus.seed),
              utils.xstr(self._getIdxOfCorpus(corpus._parent)),
              utils.xstr(corpus.networkData.getFuzzMessageIndex()),
              corpus.stats["new"], corpus.stats["crashes"])
         print "    Corpus %3d  (%5s):  Parent: %3s  Msg: %3s  Children: %3d  Crashes: %3d" % d
    def _build(self):
        """
        Build filter accoriding to filtering parameters.

        :rtype: list
        """
        opts = self.opts
        filter_list = []

        # No timestring parameter, range parameters a given
        if not opts.timestring and any(
            (xstr(opts.newer_than), xstr(opts.older_than))):
            print 'ERROR: Parameters newer_than/older_than require timestring to be given'
            sys.exit(1)
        # Timestring used alone without newer_than/older_than
        if opts.timestring is not None and not all(
            (xstr(opts.newer_than), xstr(opts.older_than))):
            f = api.filter.build_filter(kindOf='timestring',
                                        value=opts.timestring)
            if f:
                filter_list.append(f)

        # Timebase filtering
        timebased = zip(('newer_than', 'older_than'),
                        (opts.newer_than, opts.older_than))
        for opt, value in timebased:
            if value is None:
                continue
            f = api.filter.build_filter(kindOf=opt,
                                        value=value,
                                        timestring=opts.timestring,
                                        time_unit=opts.time_unit)
            if f:
                filter_list.append(f)

        # Add filtering based on suffix|prefix|regex
        patternbased = zip(('suffix', 'prefix', 'regex'),
                           (opts.suffix, opts.prefix, opts.regex))

        for opt, value in patternbased:
            if value is None:
                continue
            f = api.filter.build_filter(kindOf=opt, value=value)
            if f:
                filter_list.append(f)

        # Add exclude filter
        for pattern in opts.exclude or []:
            f = {'pattern': pattern, 'exclude': True}
            filter_list.append(f)

        return filter_list
Пример #5
0
    def _build(self):
        """
        Build filter accoriding to filtering parameters.

        :rtype: list
        """
        opts = self.opts
        filter_list = []

        # No timestring parameter, range parameters a given
        if not opts.timestring and any((xstr(opts.newer_than),
                                        xstr(opts.older_than))):
            print 'ERROR: Parameters newer_than/older_than require timestring to be given'
            sys.exit(1)
        # Timestring used alone without newer_than/older_than
        if opts.timestring is not None and not all((xstr(opts.newer_than),
                                                    xstr(opts.older_than))):
            f = api.filter.build_filter(kindOf='timestring',
                                        value=opts.timestring)
            if f:
                filter_list.append(f)

        # Timebase filtering
        timebased = zip(('newer_than', 'older_than'), (opts.newer_than,
                                                       opts.older_than))
        for opt, value in timebased:
            if value is None:
                continue
            f = api.filter.build_filter(kindOf=opt, value=value,
                                        timestring=opts.timestring,
                                        time_unit=opts.time_unit)
            if f:
                filter_list.append(f)

        # Add filtering based on suffix|prefix|regex
        patternbased = zip(('suffix', 'prefix', 'regex'),
                           (opts.suffix, opts.prefix, opts.regex))

        for opt, value in patternbased:
            if value is None:
                continue
            f = api.filter.build_filter(kindOf=opt, value=value)
            if f:
                filter_list.append(f)

        # Add exclude filter
        for pattern in opts.exclude or []:
            f = {'pattern': pattern, 'exclude': True}
            filter_list.append(f)

        return filter_list
Пример #6
0
    def _apply_filters(self, source_items, act_on):
        """Applies filters to a list of indices or snapshots.
        :param source_items:   List of indices or snapshots.
        :param act_on:  Specifies whether we act on indices or snapshots.
        """
        opts = self.opts
        all_items_selected = opts.get('all_{0}'.format(act_on), None)

        # Choose explicitly chosen indices or snapshots
        #
        if act_on == 'indices':
            explicit_items = opts.index or []
        else:
            explicit_items = opts.snapshot or []

        # I don't care about using only timestring if it's a `dry_run` of show
        if not any((xstr(opts.newer_than), xstr(opts.older_than), opts.dry_run)) and \
                opts.timestring:
            logger.warn('Used only timestring parameter.')
            logger.warn('Actions can be performed on all %s matching %s',
                        act_on, opts.timestring)

        logger.debug("Full list of %s: %s", act_on, source_items)

        if not source_items:
            print 'ERROR. No {0} found in Elasticsearch.'.format(act_on)
            sys.exit(1)
        else:
            working_list = source_items

        # No filters has been added and not all items selected,
        # this means index or snapshot parameter is used alone.
        if not all_items_selected and not self.ifilter.filter_list:
            working_list = []
        else:
            # Otherwise safely apply filtering
            working_list = self.ifilter.apply(working_list, act_on=act_on)

        # Include explict items into resulting working list.
        if explicit_items:
            working_list.extend(
                (i for i in explicit_items if i in source_items))

        if not working_list:
            logger.error('No %s matched provided args: %s', act_on, opts)
            print "ERROR. No {} found in Elasticsearch.".format(act_on)
            sys.exit(99)

        # Make a sorted, unique list of indices/snapshots
        return sorted(list(set(working_list)))
Пример #7
0
    def _apply_filters(self, source_items, act_on):
        """Applies filters to a list of indices or snapshots.
        :param source_items:   List of indices or snapshots.
        :param act_on:  Specifies whether we act on indices or snapshots.
        """
        opts = self.opts
        all_items_selected = opts.get('all_{0}'.format(act_on), None)

        # Choose explicitly chosen indices or snapshots
        #
        if act_on == 'indices':
            explicit_items = opts.index or []
        else:
            explicit_items = opts.snapshot or []

        # I don't care about using only timestring if it's a `dry_run` of show
        if not any((xstr(opts.newer_than), xstr(opts.older_than), opts.dry_run)) and \
                opts.timestring:
            logger.warn('Used only timestring parameter.')
            logger.warn('Actions can be performed on all %s matching %s', act_on, opts.timestring)

        logger.debug("Full list of %s: %s", act_on, source_items)

        if not source_items:
            print 'ERROR. No {0} found in Elasticsearch.'.format(act_on)
            sys.exit(1)
        else:
            working_list = source_items

        # No filters has been added and not all items selected,
        # this means index or snapshot parameter is used alone.
        if not all_items_selected and not self.ifilter.filter_list:
            working_list = []
        else:
            # Otherwise safely apply filtering
            working_list = self.ifilter.apply(working_list, act_on=act_on)

        # Include explict items into resulting working list.
        if explicit_items:
            working_list.extend((i for i in explicit_items if i in source_items))

        if not working_list:
            logger.error('No %s matched provided args: %s', act_on, opts)
            print "ERROR. No {} found in Elasticsearch.".format(act_on)
            sys.exit(99)

        # Make a sorted, unique list of indices/snapshots
        return sorted(list(set(working_list)))
Пример #8
0
    def buildSubQuery(self, groupid, range=[None, time.strftime("%Y-%m-%d")], ignore_aggregates=False):
        # Parameters used in Subquery select Clause
        subParameters = self.returnParametersAsList(groupid)
        subGroupBy = self.returnGroupByAsList(groupid)
        # Required parameters for a subquery to do joins (date ranges are done internally)
        # Build Date Ranges for the where clause
        # Check DATES are valid, if not use default
        if not utils.is_valid_date(utils.xstr(range[0])):
            range[0] = ""
        if not utils.is_valid_date(utils.xstr(range[1])):
            range[1] = time.strftime("%Y-%m-%d")
        subDateRange = (
            self.name
            + ".ds>='"
            + utils.xstr(range[0]).split(" ")[0]
            + "' and "
            + self.name
            + ".ds<='"
            + utils.xstr(range[1]).split(" ")[0]
            + "'"
        )
        # Check if DATES have time, if so append server_date.
        if utils.has_valid_time(utils.xstr(range[0])):
            subDateRange += " and " + self.name + ".server_date>='" + utils.xstr(range[0]) + "'"
        if utils.has_valid_time(utils.xstr(range[1])):
            subDateRange += " and " + self.name + ".server_date<='" + utils.xstr(range[1]) + "'"
        subWhere = " where " + subDateRange
        if len(self.wheres) > 0:
            logicOp = " and "
        """for where in self.wheres:
			subWhere += logicOp
			subWhere += where
			logicOp = " or "
		"""
        for key in self.wheresMap.keys():
            subWhere += logicOp
            counter = 1
            subWhere += "("
            for where in self.wheresMap[key]:
                logicOp = " or "
                subWhere += where
                if counter != len(self.wheresMap[key]):
                    subWhere += logicOp
                counter += 1
            subWhere += ")"
            logicOp = " and "
        query = "select " + subParameters + " from " + utils.xstr(self.name) + subWhere
        # if ignore aggregates is not nessesary then include the groupby parameters
        if self.ignore_aggregates == False:
            query = query + " group by " + subGroupBy
        return query
Пример #9
0
 def raw_data(self):
     return '%s$%s$%s$%s$%s$%s$%s$%s$%s' % (
         self.name,
         utils.xstr(self.timeout),
         utils.xstr(self.begin_time),
         utils.xstr(self.end_time),
         utils.xstr(self.begin_date),
         utils.xstr(self.end_date),
         utils.xstr(self.begin_lock_date),
         utils.xstr(self.end_lock_date),
         utils.xstr(self.day_mask),
     )
Пример #10
0
	def __parseQueryDateRanges(self,range):
		if not utils.is_valid_date(utils.xstr(range[0])):
			range[0] = ''
		if not utils.is_valid_date(utils.xstr(range[1])):
			range[1] = time.strftime('%Y-%m-%d')
		if utils.has_valid_time(utils.xstr(range[0])):
			start_date, start_time = range[0].split(" ")
		else:
			start_date = range[0]
			start_time = ''
		if utils.has_valid_time(utils.xstr(range[1])):
			end_date, end_time = range[1].split(" ")
		else:
			end_date = range[1]
			end_time = '23:59:59'
		query = self.query
		query = query.replace("#STARTDATE",start_date)
		query = query.replace("#STARTTIME",start_time)
		query = query.replace("#ENDDATE",end_date)
		query = query.replace("#ENDTIME",end_time)
		self.query = query
Пример #11
0
 def to_string(self):
     return "{ objectid: [" + xstr(self.objectid) + \
             "], name: [" + xstr(self.name) + \
             "], year: [" + xstr(self.year) + \
             "], designer: [" + ','.join(self.designers) + \
             "], publisher: [" + ','.join(self.publishers) + \
             "], playtime: (" + "-".join(self.playtime) + \
             "), players: (" + "-".join(self.players) + \
             "), suggested: [" + xstr(self.suggested) + \
             "], age: [" + xstr(self.age) + \
             "], language: [" + xstr(self.language) + \
             "], image: [" + xstr(self.image) + \
             "], description: [" + xstr(self.description) + "] " + \
             "}"
Пример #12
0
def main(input_fc=None, output_csv=None, mode=settings.mode):

    if not arcpy.Exists(input_fc):
        utils.msg("Input, {} doesn't exist.".format(input_fc))
        sys.exit()

    try:
        with open(output_csv, 'wb') as output_file:
            writer = csv.writer(output_file, dialect='excel', \
                quotechar='"', quoting=csv.QUOTE_ALL)

            # fields generated by ArcGIS & lacking a direct use in Shepherd
            ignored_fields = ['OID', 'OBJECTID', 'Shape']
            # header row should contain our fields declaration
            fields = [
                field.name for field in arcpy.ListFields(input_fc)
                if field.name not in ignored_fields
            ]
            writer.writerow(fields)

            our_date = "Date_formatted"
            date_exists = False
            # If we don't have a formatted date field, don't try to manipulate it.
            if our_date in fields:
                date_pos = fields.index(our_date)
                date_exists = True
            with arcpy.da.SearchCursor(input_fc, fields) as cursor:
                for row in cursor:
                    formatted_row = list(row[:])
                    if date_exists:
                        date_value = row[date_pos]
                        try:
                            formatted_row[date_pos] = formatDate(date_value)
                        except Exception as e:
                            arcpy.AddError(e)
                        writer.writerow([utils.xstr(r) for r in formatted_row])

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    utils.msg("Exported results saved to %s." % output_csv)
    if mode == 'toolbox':
        time.sleep(4)
Пример #13
0
def main(input_fc=None, output_csv=None, mode=settings.mode):

    if not arcpy.Exists(input_fc):
        utils.msg("Input, {} doesn't exist.".format(input_fc))
        sys.exit()

    try:
        with open(output_csv, 'wb') as output_file:
            writer = csv.writer(output_file, dialect='excel', \
                quotechar='"', quoting=csv.QUOTE_ALL)

            # fields generated by ArcGIS & lacking a direct use in Shepherd
            ignored_fields = ['OID', 'OBJECTID', 'Shape']
            # header row should contain our fields declaration
            fields = [field.name 
                    for field in 
                    arcpy.ListFields(input_fc)
                    if field.name not in ignored_fields]
            writer.writerow(fields)

            our_date = "Date_formatted"
            date_exists = False
            # If we don't have a formatted date field, don't try to manipulate it.
            if our_date in fields:
                date_pos = fields.index(our_date)
                date_exists = True
            with arcpy.da.SearchCursor(input_fc, fields) as cursor:
                for row in cursor:
                    formatted_row = list(row[:])
                    if date_exists:
                        date_value = row[date_pos]
                        try:
                            formatted_row[date_pos] = formatDate(date_value)
                        except Exception as e:
                            arcpy.AddError(e)
                        writer.writerow([utils.xstr(r) for r in formatted_row])

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    utils.msg("Exported results saved to %s." % output_csv)
    if mode == 'toolbox':
        time.sleep(4)
Пример #14
0
 def returnGroupByAsList(self, groupid, ignore_aggregates=False):
     groupbyList = ""
     # Traverse through each parameter in object's list
     for param in self.parameters:
         is_changed = True
         if self.ignore_aggregates == False:
             # Apply any functions to the parameter, ie SUM, AVG, etc.
             is_changed = export_utils.apply_selection_function(utils.xstr(self.name + "." + param), groupid)[2]
         # If no change was made by function, then it belongs in the groupby list
         if not is_changed and param not in self.groupByList:
             self.groupByList.append(param)
             # Groupby Required Parameters - make sure they are there
     if "uuid" not in self.groupByList:
         self.groupByList.append("uuid")
     # Traverse through the new groupby list, this is the original parameter list, without parameters with math functions
     counter = 1
     for groupby_param in self.groupByList:
         groupbyList += self.name + "." + groupby_param
         if counter != len(self.groupByList):
             groupbyList += ","
         counter += 1
     return groupbyList
Пример #15
0
def main(input_features=None, id_field=None, where_clause=None, output_coords=None, 
        output_genetics=None, mode=settings.mode):
   
    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        utils.msg("This tools only works with geographic or projected data.", mtype='error')
        sys.exit()

    if not id_field:
        id_field = settings.id_field

    # Find our Loci columns. 
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))

    comments = """Export to Alleles in Space from the input `{input_features}`. 
Export occurred on {datetime}
Coordinates are in {sr_name}, a {sr_type} coordinate system.""".format(
        input_features=input_features, datetime=datetime.now(), \
        sr_name=sr.name, sr_type=sr.type.lower())

    if sr.type == 'Geographic':
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord
    else:
        # two coordinates in a projected space
        loc_a = settings.x_coord
        loc_b = settings.y_coord
   
    selected_columns = [id_field, loc_a, loc_b] + loci.columns
    rows = arcpy.da.SearchCursor(input_features, selected_columns, where_clause)

    coordinate_rows = []
    genetic_rows = []
    for row in rows:
        id_field = row[0]
        loc_a_val = row[1]
        loc_b_val = row[2]
    
        coordinate_row = [id_field, loc_a_val, loc_b_val]
        genetic_row = [id_field]

        for (key, cols) in loci.fields.items():
            # loci values should be separated by an '\'.

            loci_values = []
            for col in cols:
                col_pos = selected_columns.index(col)
                allele_val = row[col_pos]
                # handle missing and incomplete genotypes.
                if allele_val is None:
                    allele_val = "0"
                else:
                    allele_val = str(allele_val)
                loci_values.append(allele_val)

            genetic_row.append('\\'.join(loci_values))

        coordinate_rows.append(coordinate_row)
        genetic_rows.append(genetic_row)

    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")

        # Alleles in Space expects comma-delimited outputs
        sep = ","

        with open(output_coords, 'wb') as coords_file:
            for raw_row in coordinate_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                coords_file.write("{0}\n".format(sep.join(row)))
            coords_file.write(";\n")
            coords_file.write(comments)
            utils.msg("Exported coordinates saved to %s." % output_coords)

        with open(output_genetics, 'wb') as genetics_file:
            # start the file with the number or loci
            genetics_file.write("{0}\n".format(loci.count))
            for raw_row in genetic_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                genetics_file.write("{0}\n".format(sep.join(row)))
            genetics_file.write(";\n")
            genetics_file.write(comments) 
            utils.msg("Exported genetics saved to %s." % output_genetics)

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    if mode == 'toolbox':
        time.sleep(4)
Пример #16
0
def run_geodesic_gp(input_fc, unit_factor, output_matrix, row_count,
                    is_spagedi):
    input_fc_mem = 'in_memory/input_fc'
    try:
        utils.msg("Copying features into memory...")
        arcpy.CopyFeatures_management(input_fc, input_fc_mem)
    except Exception as e:
        msg = "Unable to copy features into memory."
        utils.msg(msg, mtype='error', exception=e)
        sys.exit()

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_fc_mem)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        msg = "This tools only works with geographic or projected data."
        utils.msg(msg, mtype='error')
        sys.exit()

    utils.msg("Finding all input points...")
    distance_matrix = OrderedDict()
    points = OrderedDict()
    records = arcpy.da.SearchCursor(input_fc_mem, ['OID@', 'SHAPE@XY'])
    for row in records:
        (fid, point) = row
        points[fid] = arcpy.Point(point[0], point[1])

    indicator = 0
    utils.msg("Computing distances...")
    for (fid, from_point) in points.items():
        pct_progress = int(fid / float(row_count) * 100)
        if pct_progress > indicator:
            indicator = pct_progress
            utils.msg("{0}%".format(indicator))
        p1 = points[fid]
        distance_matrix[fid] = OrderedDict()
        for (to_fid, to_point) in points.items():
            if to_fid == fid:
                dist = 0
            elif distance_matrix.has_key(to_fid) and \
                    distance_matrix[to_fid][fid] is not None:
                # here, modeling a symmetrical matrix
                dist = distance_matrix[to_fid][fid]
            else:
                p2 = points[to_fid]
                if p1.equals(p2):
                    dist = 0
                else:
                    # Each Polyline initialization must pay the COM object
                    # gods, and ends up making this much more expensive than
                    # AO C++ or even comtypes calls.
                    line = arcpy.Polyline(arcpy.Array([p1, p2]), sr)
                    # distance, always returned in meters, scale by our
                    # expected result units.
                    dist = line.getLength("GEODESIC") * unit_factor

            distance_matrix[fid][to_fid] = dist
    utils.msg("Distance matrix calculations complete.")

    # FIXME: generate it as a CSV file, then do TableToTable to pull it back in

    # Now compute the lines between these locations.
    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")
        # The SPAGeDi matrix format are described in section 3.7 of the manual.
        if is_spagedi:
            first_header_cell = "M%i" % row_count
            sep = "\t"
        else:
            first_header_cell = ""
            sep = ","

        with open(output_matrix, 'w') as csv:
            # initialize with our header row
            output_rows = [[first_header_cell] + \
                    [str(s) for s in distance_matrix.keys()]]
            for (fid, row) in distance_matrix.items():
                res = [str(fid)] + [utils.xstr(s) for s in row.values()]
                output_rows.append(res)
            for row in output_rows:
                csv.write("{0}\n".format(sep.join(row)))
            if is_spagedi:
                csv.write("END\n")

    except Exception as e:
        utils.msg("Error creating distance matrix.",
                  mtype='error',
                  exception=e)
        sys.exit()
Пример #17
0
def subreddit_entry_to_listitem(subreddit_entry):
    from utils import compose_list_item, build_script, xstr, prettify_reddit_query
    addtl_subr_info={}
    nsfw=False
    icon=banner=header=public_description=display_name=override_header_image=None
    header_ar=0
    addtl_subr_info=ret_sub_info(subreddit_entry)

    #strip out the alias identifier from the subreddit string retrieved from the file so we can process it.
    entry_type, subreddit, alias, shortcut_description=parse_subreddit_entry(subreddit_entry)
    icon=default_icon=ret_settings_type_default_icon(entry_type)

    pretty_label=prettify_reddit_query(alias)
    pretty_label=pretty_label.replace('+',' + ')

    if addtl_subr_info:
        icon=addtl_subr_info.get('icon_img')
        banner=addtl_subr_info.get('banner_img')  #rectangular shape
        header=addtl_subr_info.get('header_img')  #square shape  from  bannerTvImageUrl
        header_ar=img_ar(addtl_subr_info.get('header_size'))
        if (header_ar > 8) and (not icon): #some header_img are very wide. this is to check and override the icon display in the gui
            override_header_image=header
            banner=header
        public_description=xstr( addtl_subr_info.get('public_description',''))
        display_name=xstr(addtl_subr_info.get('display_name',''))

    #log('{} icon={} header={} banner={}'.format( subreddit, repr(icon), repr(header), repr(banner) ))

    if entry_type=='link':  #<-- added new ability to have youtube channels as a shortcut on the main screen
        #here, the subreddit variable contains a url. we made sure that it points to a youtube channel(ContextMenus.py). that way, there is no need to specify 'channel' when calling listRelatedVideo
        liz = compose_list_item( pretty_label, entry_type, "", "script", build_script("listRelatedVideo",subreddit,alias) )
    else: #domain, subreddit, combined, search, multireddit
        reddit_url=assemble_reddit_filter_string("",subreddit, "yes")

        if entry_type=='domain':
            #remove the identifier that this setting is a domain
            pretty_label=re.findall(r'(?::|\/domain\/)(.+)',subreddit)[0]

        if subreddit.lower() in ["all","popular"]:
            liz = compose_list_item( pretty_label, entry_type, "", "script", build_script("listSubReddit",reddit_url,alias) )
        else:
            if addtl_subr_info: #if we have additional info about this subreddit
                #log(repr(addtl_subr_info))
                #title=addtl_subr_info.get('title','')+'\n'
                #display_name=xstr(addtl_subr_info.get('display_name',''))
                #if samealphabetic( title, display_name): title=''

                #header_title=xstr(addtl_subr_info.get('header_title',''))
                #in reddit_viewer,  title, header_title and public_description is shown as plot
                nsfw=addtl_subr_info.get('over18')

                icon=next((item for item in [icon,banner,header] if item ), '') or default_icon #picks the first item that is not None
                #icon=icon or default_icon
                #log( pretty_label + ' icon=' + icon + ' nsfw='+repr(nsfw))
                liz = compose_list_item( pretty_label, entry_type, "", "script", build_script("listSubReddit",reddit_url,alias) )
            else:
                liz = compose_list_item( pretty_label, entry_type, "", "script", build_script("listSubReddit",reddit_url,alias) )

    liz.setArt({ "thumb": icon, "banner":banner, "fanart":override_header_image })
    liz.setInfo('video', {"Title":display_name, "plot":public_description} )

    if nsfw:
        liz.setProperty('nsfw', 'true' )

    return liz
Пример #18
0
def main(input_features=None,
         where_clause=None,
         order_by=None,
         output_name=None,
         mode=settings.mode):

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        utils.msg("This tools only works with geographic or projected data.",
                  mtype='error')
        sys.exit()

    # Find our Loci columns.
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))

    # sql clause can be prefix or suffix; set up ORDER BY
    sql_clause = (None, "ORDER BY {0} ASC".format(order_by))
    pops = OrderedDict()
    # query the input_features in ascending order; filtering as needed
    rows = arcpy.da.SearchCursor(input_features, order_by, where_clause, "",
                                 "", sql_clause)
    row_count = 0
    for row in rows:
        row_count += 1
        pop = row[0]
        if pops.has_key(pop):
            pops[pop] += 1
        else:
            pops[pop] = 1

    # Start with any number of header lines describing what this
    # file is, each line should be prefixed with //.
    comments = """// Export to SPAGeDi from the input `{input_features}`. 
// Export occurred on {datetime}
// Coordinates are in {sr_name}, a {sr_type} coordinate system.""".format(
        input_features=input_features, datetime=datetime.now(), \
        sr_name=sr.name, sr_type=sr.type)

    comment_row = [comments]
    """ 
    There are three possible ways to specify population (SPAGeDi manual, 2.4):
    
    1) as categorical groups, where one population includes all individuals 
       sharing the same categorical variable.

    2) as spatial groups, where a spatial group includes all individuals 
       sharing the same spatial coordinates and following each other in 
       the data file.

    3) as spatio-categorical groups, where a spatio-categorical group 
       includes all individuals belonging to both the same spatial 
       group and categorical group. When populations are defined using the 
       categorical variable, the spatial coordinates of a given population 
       are computed by averaging the coordinates of the individuals it contains.
    """

    # FIXME: another spot where 'observation_id' differs from 'individual_id';
    # deduplicate in order to have one value PER individual PER population.

    categories = len(pops.keys())

    # FIXME: presumes only two coords, SPAGeDi supports three. Extract depth?
    if sr.type == 'Geographic':
        # Assumes decimal degrees; based on the statement 'if the number of
        # spatial coordinates is set to -2, latitudes and longitudes must be
        # given in degrees with decimal, using negative numbers for Southern
        # latitude or Western longitude.
        xy_type = -2
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord
    else:
        # two coordinates in a projected space
        xy_type = 2
        loc_a = settings.x_coord
        loc_b = settings.y_coord

    num_loci = loci.count

    # FIXME: loci number of digits, defined in SPAGeDi manual 3.1 as "number
    # of digits used to code one allele (1 to 3); or set a value =0 (in fact
    # the value given for missing data) in the case of dominant markers".
    loci_digits = 2

    # get the maximum number of different values per loci
    max_ploidy = max(map(len, loci.fields.values()))

    # 1st line: set of 6 numbers separated by a tabulation representing:
    header_row = [
        row_count,  # number of invidivuals
        categories,  # number of categories
        xy_type,  # number of coordinates
        num_loci,  # number of loci 
        loci_digits,  # number of digits used to code one allele
        max_ploidy  # max ploidy in the data
    ]

    # 2nd line: # of distance intervals; upper distance of each interval.

    # note 1: alternatively you can enter only the desired number of intervals
    #         preceded by a negative sign; the program then defines the
    #         n maximal distances in such a way that the number of pairwise
    #         comparisons within each distance interval is approx. constant.
    # note 2: if you do not wish distance intervals, put 0
    # note 3: if you use latitude + longitude, distance intervals
    #         must be given in km.

    # TODO: what is this and how do we use it effectively?
    distances_row = [0]

    # 3rd line: column labels (<=15 characters).
    base_cols = [settings.id_field, order_by, loc_a, loc_b]
    labels_row = base_cols + loci.columns

    # where_clause is used to ensure only those records with genetic data
    # are copied to the output.
    selected_columns = base_cols + loci.columns
    rows = arcpy.da.SearchCursor(input_features, selected_columns, \
            where_clause, "", "", sql_clause)

    data_rows = []
    for row in rows:
        # our two string fields can't contain spaces, based on Autocio.c: 3857
        id_field = str(row[0]).replace(" ", "_")
        pop_field = str(row[1]).replace(
            " ", "_")  # 'order_by', or population 'group by'
        loc_a_val = row[2]
        loc_b_val = row[3]

        data_row = [id_field, pop_field, loc_a_val, loc_b_val]

        for (key, cols) in loci.fields.items():
            # Loci data can be encoded in a number of formats, the values
            # separated by any non-numeric values (SPAGeDi manual, 3.2.1).
            # Here, we use spaces.

            loci_values = []
            for col in cols:
                col_pos = selected_columns.index(col)
                allele_val = row[col_pos]
                # FIXME: handle missing and incomplete genotypes.
                if allele_val is None:
                    allele_val = "0"
                else:
                    allele_val = str(allele_val)
                loci_len = len(allele_val)
                if loci_len > loci_digits:
                    loci_digits = loci_len
                loci_values.append(allele_val)

            data_row.append(" ".join(loci_values))
        data_rows.append(data_row)

    # update header based on revised loci_digits
    header_row[4] = loci_digits

    # Now compute the lines between these locations.
    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")

        output_rows = [comment_row, header_row, distances_row, labels_row] + \
                data_rows

        # SPAGeDi expects tab-delimited outputs
        sep = "\t"

        with open(output_name, 'w') as output_file:
            for raw_row in output_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                output_file.write("{0}\n".format(sep.join(row)))

            # after the last line of invidivual data the word END is required.
            output_file.write("END\n")

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    utils.msg("Exported results saved to %s." % output_name)
    if mode == 'toolbox':
        time.sleep(4)
Пример #19
0
def export_report(parameters, keys = 'uuid', range = [None,time.strftime('%Y-%m-%d')], type = 'csv', outputname = None, emailaddr = None, where = None, description = 'No description', distinct = 'Yes', groupid = 'default', ftpuser = '******', vendor_event = 'InstallationInfo', report_name = None, debug = False, raw_output = False, scp = False, target_user = None, target_server = None, target_dir = None): 
	try:
		COMMAND = ''
		#Build Query into COMMAND (Prefix: "hive --config $HIVE_HOME/conf -e \"" is added in function export_hive_to_file())
		COMMAND = "\"use " + groupid + "; "
		if report_name is not None:
			reportBuilder = report_builder.ReportBuilder(report_name,range,groupid)
			COMMAND += reportBuilder.getReportQuery()
		else:
			#Create QueryBuilder Object
			queryBuilder = query_builder.QueryBuilder()
			#Sanitize Parameters
			#Creates:
			###eventList - A List of Event Objects that have already been populated by sanatize parameters function.
			###parameters - A List of the original parameters requested in it's original order.
			###parameters_alias - A List of the aliases of the original parameters requested in it's order.
			eventList, parameters, parametersAliasList = export_utils.sanitize_parameters(parameters,groupid,raw_output)
			#Sanitize Where
			where, where_tables = export_utils.sanitize_where(where, parameters, groupid, eventList, vendor_event)
			#Add eventList to queryBuilder
			queryBuilder.addToEvents(eventList)
			COMMAND += queryBuilder.buildQuery(groupid,type,range,parameters,raw_output)
		COMMAND += "\""
		print
		print "Query:"
		print "-------------------------"
		print COMMAND
		print "-------------------------"
		print
		if debug == False:
			print "START TIME: [" + time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) + "]" 
			print "START COMMAND: " + COMMAND
			outputpath, outputname, user, password = export_hive_to_file(COMMAND, utils.listToString(parametersAliasList), type, outputname, ftpuser)
			try:
				email("Report: Data Export Utility","complete!","smithmicro.com",emailaddr,"{'text':'email from export report', 'html':'<p>Your file is now complete. To access your report, click on this link: <a href=\"ftp://"+os.uname()[1]+"/"+type+"/"+outputname+"\">"+outputname+"</a> and enter your username and password below.</p><p>Username: <b>"+user.strip()+"</b><br />Password: <b>"+password.strip()+"</b></p>'}")
			except:
				print "The Email Server has Raised an Error"
			if bool(scp) == True: 	
				email_addr = user.strip()	
				scp_method(email_addr, outputname, target_user, target_server, target_dir)
	except IOError, e:
		print "I/O error(%(error0)s):(%(error1)s)" % { "error0":e[0],"error1":e[1] }
		email("Export_Report","failed","analytics.smithmicro.com",emailaddr,"\nError:\nI/O error("+utils.xstr(e[0])+"): "+xstr(e[1])+"\n"+utils.xstr(sys.exc_info()[0]))
def subreddit_entry_to_listitem(subreddit_entry):
    from utils import compose_list_item, build_script, xstr, prettify_reddit_query
    addtl_subr_info = {}
    nsfw = False
    icon = banner = header = public_description = display_name = override_header_image = None
    header_ar = 0
    addtl_subr_info = ret_sub_info(subreddit_entry)

    #strip out the alias identifier from the subreddit string retrieved from the file so we can process it.
    entry_type, subreddit, alias, shortcut_description = parse_subreddit_entry(
        subreddit_entry)
    icon = default_icon = ret_settings_type_default_icon(entry_type)

    pretty_label = prettify_reddit_query(alias)
    pretty_label = pretty_label.replace('+', ' + ')

    if addtl_subr_info:
        icon = addtl_subr_info.get('icon_img')
        banner = addtl_subr_info.get('banner_img')  #rectangular shape
        header = addtl_subr_info.get(
            'header_img')  #square shape  from  bannerTvImageUrl
        header_ar = img_ar(addtl_subr_info.get('header_size'))
        if (header_ar > 8) and (
                not icon
        ):  #some header_img are very wide. this is to check and override the icon display in the gui
            override_header_image = header
            banner = header
        public_description = xstr(addtl_subr_info.get('public_description',
                                                      ''))
        display_name = xstr(addtl_subr_info.get('display_name', ''))

    #log('{} icon={} header={} banner={}'.format( subreddit, repr(icon), repr(header), repr(banner) ))

    if entry_type == 'link':  #<-- added new ability to have youtube channels as a shortcut on the main screen
        #here, the subreddit variable contains a url. we made sure that it points to a youtube channel(ContextMenus.py). that way, there is no need to specify 'channel' when calling listRelatedVideo
        liz = compose_list_item(
            pretty_label, entry_type, "", "script",
            build_script("listRelatedVideo", subreddit, alias))
    else:  #domain, subreddit, combined, search, multireddit
        reddit_url = assemble_reddit_filter_string("", subreddit, "yes")

        if entry_type == 'domain':
            #remove the identifier that this setting is a domain
            pretty_label = re.findall(r'(?::|\/domain\/)(.+)', subreddit)[0]

        if subreddit.lower() in ["all", "popular"]:
            liz = compose_list_item(
                pretty_label, entry_type, "", "script",
                build_script("listSubReddit", reddit_url, alias))
        else:
            if addtl_subr_info:  #if we have additional info about this subreddit
                #log(repr(addtl_subr_info))
                #title=addtl_subr_info.get('title','')+'\n'
                #display_name=xstr(addtl_subr_info.get('display_name',''))
                #if samealphabetic( title, display_name): title=''

                #header_title=xstr(addtl_subr_info.get('header_title',''))
                #in reddit_viewer,  title, header_title and public_description is shown as plot
                nsfw = addtl_subr_info.get('over18')

                icon = next(
                    (item for item in [icon, banner, header] if item),
                    '') or default_icon  #picks the first item that is not None
                #icon=icon or default_icon
                #log( pretty_label + ' icon=' + icon + ' nsfw='+repr(nsfw))
                liz = compose_list_item(
                    pretty_label, entry_type, "", "script",
                    build_script("listSubReddit", reddit_url, alias))
            else:
                liz = compose_list_item(
                    pretty_label, entry_type, "", "script",
                    build_script("listSubReddit", reddit_url, alias))

    liz.setArt({
        "thumb": icon,
        "banner": banner,
        "fanart": override_header_image
    })
    liz.setInfo('video', {"Title": display_name, "plot": public_description})

    if nsfw:
        liz.setProperty('nsfw', 'true')

    return liz
Пример #21
0
def index(url,name,type_):
    from utils import xstr, samealphabetic, hassamealphabetic
    from reddit import load_subredditsFile, parse_subreddit_entry, create_default_subreddits, assemble_reddit_filter_string, ret_sub_info, ret_settings_type_default_icon


    if not os.path.exists(subredditsFile):  #if not os.path.exists(subredditsFile):
        create_default_subreddits()

    subredditsFile_entries=load_subredditsFile()

    subredditsFile_entries.sort(key=lambda y: y.lower())

    addtl_subr_info={}

    xbmcplugin.setContent(pluginhandle, "mixed") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    next_mode='listSubReddit'

    for subreddit_entry in subredditsFile_entries:

        addtl_subr_info=ret_sub_info(subreddit_entry)

        entry_type, subreddit, alias, shortcut_description=parse_subreddit_entry(subreddit_entry)

        icon=default_icon='' #addon_path+"/resources/skins/Default/media/"+ret_settings_type_default_icon(entry_type)

        url= assemble_reddit_filter_string("",subreddit, "yes")

        if subreddit.lower() in ["all","popular"]:
            addDir(subreddit, url, next_mode, icon, subreddit, { "plot": translation(30009) } )  #Displays the currently most popular content from all of reddit
        else:
            if addtl_subr_info: #if we have additional info about this subreddit

                title=xstr(addtl_subr_info.get('title'))+'\n'
                display_name=xstr(addtl_subr_info.get('display_name'))
                if samealphabetic( title, display_name): title=''


                header_title=xstr(addtl_subr_info.get('header_title'))
                public_description=xstr( addtl_subr_info.get('public_description'))

                if samealphabetic( header_title, public_description): public_description=''
                if samealphabetic(title,public_description): public_description=''


                if entry_type=='subreddit':
                    display_name='r/'+display_name
                shortcut_description='[COLOR cadetblue][B]%s[/B][/COLOR]\n%s[I]%s[/I]\n%s' %(display_name,title,header_title,public_description )

                icon=addtl_subr_info.get('icon_img')
                banner=addtl_subr_info.get('banner_img')
                header=addtl_subr_info.get('header_img')  #usually the small icon on upper left side on subreddit screen

                icon=next((item for item in [icon,banner,header] if item ), '') or default_icon

                addDirR(alias, url, next_mode, icon,
                        type_=subreddit,
                        listitem_infolabel={ "plot": shortcut_description },
                        file_entry=subreddit_entry,
                        banner_image=banner )
            else:
                addDirR(alias, url, next_mode, icon, subreddit, { "plot": shortcut_description }, subreddit_entry )

    addDir("[B]- "+translation(30001)+"[/B]", "", 'addSubreddit', "", "", { "plot": translation(30006) } ) #"Customize this list with your favorite subreddit."
    addDir("[B]- "+translation(30005)+"[/B]", "",'searchReddits', "", "", { "plot": translation(30010) } ) #"Search reddit for a particular post or topic

    xbmcplugin.endOfDirectory(pluginhandle)
Пример #22
0
def main(input_features=None, id_field=None, where_clause='', order_by=None,
        output_name=None, format_type='Excel', mode='toolbox'):

    script_path = os.path.abspath(__file__)
    utils.msg("Executing {}...".format(script_path))

    # try to set the id based on input, otherwise go off of the config.
    if id_field is not None:
        primary_id = id_field
    else:
        primary_id = settings.id_field

    # set mode based on how script is called.
    settings.mode = mode
    add_output = arcpy.env.addOutputsToMap
    arcpy.env.addOutputsToMap = True

    # ensure our order by field exists
    fields = [f.name for f in arcpy.ListFields(input_features)]
    if not order_by in fields:
        utils.msg("Unable to find order_by field, `{}`".format(order_by))
        sys.exit()

    # The Input Feature Class
    # == input_features

    # Where clause that can be used to pull out only those rows with genetic
    # data from the feature class.

    # NOTE: This parameter is optional and was included because some data
    # sets may have individual IDs based on more than just genetics
    # (i.e. photo-id).  If your data only has genetic records, this
    # parameter can be left blank.
    # == where_clause

    # The Attribute Field that distinguishes the populations in the input.

    # NOTE: This parameter is optional and was included because some data
    # sets may have more than one population in it.
    # == order_by

    try:
        # test opening the file to which the data will be written
        output_file = open(output_name, "wb")
    except Exception as e:
        utils.msg("Unable to open output file", mtype='error', exception=e)
        sys.exit()

    # initialize our haplotypes data
    haplotypes = utils.Haplotype(input_features)

    # set up the environment depending on the output format
    if format_type == 'Excel':
        # xlwt will write directly; only opened to make sure we can write to the location
        output_file.close()
        try:
            import xlwt
        except ImportError as e:
            msg = "Writing Excel Spreadsheets requires the `xlwt` library, which is" + \
                    " included in ArcGIS 10.2+. If you'd like Excel support in " + \
                    " ArcGIS 10.1, please install `xlwt` manually from PyPI: " +  \
                    "   https://pypi.python.org/pypi/xlwt/0.7.3"
            utils.msg(msg, mtype='error', exception=e)
            sys.exit() 

        # only replace Nones, keep numeric types
        from utils import xrep as xstr
        from utils import zrep as zstr
    else:
        # convert all strings to text
        from utils import zstr as zstr
        from utils import xstr as xstr

    # list of lists to emulate the Excel sheet
    output_rows = []
    haplotype_rows = []

    utils.msg("Output file open and ready for data input")

    # Find our Loci columns. 
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))

    """
    header row contains (in order):
     - number of loci
     - number of samples
     - number of populations
     - size of pop 1
     - size of pop 2
     - ...

    second row contains:
     - three blank cells
     - loci 1 label
     - loci 2 label
     - ...

    DATA starts at C4. See "GenAlEx Guide.pdf" page 15.
    """

    # sql clause can be prefix or suffix; set up ORDER BY
    sql_clause = (None, "ORDER BY {0} ASC".format(order_by))
    # query the input_features in ascending order; filtering as needed
    selected_columns = order_by
    pops = OrderedDict()
    rows = arcpy.da.SearchCursor(input_features, selected_columns,
            where_clause, "", "", sql_clause)
    row_count = 0
    for row in rows:
        row_count += 1
        pop = row[0]
        if pops.has_key(pop):
            pops[pop] +=1
        else:
            pops[pop] = 1

    pop_counts = [xstr(p) for p in pops.values()]
   
    # Creating the GenAlEx header information required for the text file.
    output_rows += [[loci.count, row_count, len(pops.keys())] + pop_counts]

    # optional title, then a list of each population
    output_rows += [['', '', ''] + pops.keys()]

    # first two rows almost exactly the same
    haplotype_rows = copy.deepcopy(output_rows)
    # 'number of loci' should be 1 for haplotype-only data
    haplotype_rows[0][0] = 1

    loci_labels = []
    for (key, cols) in loci.fields.items():
        loci_labels += [key] + [''] * (len(cols) - 1)

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type == 'Projected':
        loc_a = settings.x_coord
        loc_b = settings.y_coord
    if sr.type == 'Geographic':
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord

    primary_columns = [primary_id, order_by, loc_a, loc_b]
    exclude = primary_columns + loci.columns + ['OBJECTID', 'Shape']
    unselected_columns = []
    for field in fields:
        # add any field not currently mapped
        if field not in exclude:
            unselected_columns.append(field)
    # extra fields should start with an empty line, the location, then any
    # columns not otherwise mapped.
    extra_columns = ['', loc_a, loc_b] + unselected_columns

    output_rows += [[primary_id, order_by] + loci_labels + extra_columns]
    if haplotypes.defined:
        haplotype_rows += [[primary_id, order_by, haplotypes.column]]
    utils.msg("Header info written to output")

    # Note the WhereClause: Because the SPLASH data has both photo-id and genetic
    # records, but GenAlEx only uses genetic data, the WhereClause is used to ensure
    # only those records with genetic data are copied to the text file.
    selected_columns = primary_columns + unselected_columns + loci.columns

    for row in arcpy.da.SearchCursor(input_features, selected_columns,
            where_clause, "", "", sql_clause):
        id_field = row[0] # as set on import
        pop = row[1] # second column is 'order_by', or key column
        loc_a_val = row[2]
        loc_b_val = row[3]
        unselected_rows = list(row[4:len(unselected_columns)+4])
        result_row = [id_field, pop]
        for (key, cols) in loci.fields.items():
            for col in cols:
                col_pos = selected_columns.index(col)
                result_row.append(row[col_pos])
        result_row = result_row + ["", loc_a_val, loc_b_val] + unselected_rows
        output_rows += [[zstr(s) for s in result_row]]
        if haplotypes.defined:
            row_val = row[selected_columns.index(haplotypes.column)]
            if row_val:
                haplotype_val = haplotypes.lookup[row_val]
            else:
                haplotype_val = 0
            haplotype_res = [id_field, pop, haplotype_val]
            haplotype_rows += [[zstr(s) for s in haplotype_res]]

    # depending on our driver, handle writing dependent on type
    if format_type == 'Excel':
        # initialize our spreadsheet
        workbook = xlwt.Workbook()

        # codominant data
        worksheet_co = workbook.add_sheet('Codominant')
        for (i, row) in enumerate(output_rows):
            for (j, val) in enumerate(row):
                worksheet_co.write(i, j, val)

        # the haplotype data
        if haplotypes.defined:
            worksheet_hap = workbook.add_sheet('Haplotype')
            for (i, row) in enumerate(haplotype_rows):
                for (j, val) in enumerate(row):
                    worksheet_hap.write(i, j, val)

            # write out a mapping of haplotype names
            worksheet_hap_map = workbook.add_sheet('Haplotype Map')
            worksheet_hap_map.write(0, 0, haplotypes.column)
            worksheet_hap_map.write(0, 1, 'Numeric Code')

            for (i, (hap, code)) in enumerate(haplotypes.lookup.items(), start=1):
                worksheet_hap_map.write(i, 0, hap)
                worksheet_hap_map.write(i, 1, code)

        workbook.save(output_name)            
    else:
        # create a CSV writer
        writer = csv.writer(output_file, dialect='excel',
                quotechar='"', quoting=csv.QUOTE_ALL)
        for row in output_rows:
            writer.writerow(row)

    utils.msg("Exported results saved to %s." % output_name)
    # Close Output text file
    output_file.close()

    if mode == 'toolbox':
        time.sleep(4)

    arcpy.env.addOutputsToMap = add_output
Пример #23
0
def main(input_features=None,
         id_field=None,
         where_clause=None,
         output_coords=None,
         output_genetics=None,
         mode=settings.mode):

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        utils.msg("This tools only works with geographic or projected data.",
                  mtype='error')
        sys.exit()

    if not id_field:
        id_field = settings.id_field

    # Find our Loci columns.
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))

    comments = """Export to Alleles in Space from the input `{input_features}`. 
Export occurred on {datetime}
Coordinates are in {sr_name}, a {sr_type} coordinate system.""".format(
        input_features=input_features, datetime=datetime.now(), \
        sr_name=sr.name, sr_type=sr.type.lower())

    if sr.type == 'Geographic':
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord
    else:
        # two coordinates in a projected space
        loc_a = settings.x_coord
        loc_b = settings.y_coord

    selected_columns = [id_field, loc_a, loc_b] + loci.columns
    rows = arcpy.da.SearchCursor(input_features, selected_columns,
                                 where_clause)

    coordinate_rows = []
    genetic_rows = []
    for row in rows:
        id_field = row[0]
        loc_a_val = row[1]
        loc_b_val = row[2]

        coordinate_row = [id_field, loc_a_val, loc_b_val]
        genetic_row = [id_field]

        for (key, cols) in loci.fields.items():
            # loci values should be separated by an '\'.

            loci_values = []
            for col in cols:
                col_pos = selected_columns.index(col)
                allele_val = row[col_pos]
                # handle missing and incomplete genotypes.
                if allele_val is None:
                    allele_val = "0"
                else:
                    allele_val = str(allele_val)
                loci_values.append(allele_val)

            genetic_row.append('\\'.join(loci_values))

        coordinate_rows.append(coordinate_row)
        genetic_rows.append(genetic_row)

    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")

        # Alleles in Space expects comma-delimited outputs
        sep = ","

        with open(output_coords, 'wb') as coords_file:
            for raw_row in coordinate_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                coords_file.write("{0}\n".format(sep.join(row)))
            coords_file.write(";\n")
            coords_file.write(comments)
            utils.msg("Exported coordinates saved to %s." % output_coords)

        with open(output_genetics, 'wb') as genetics_file:
            # start the file with the number or loci
            genetics_file.write("{0}\n".format(loci.count))
            for raw_row in genetic_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                genetics_file.write("{0}\n".format(sep.join(row)))
            genetics_file.write(";\n")
            genetics_file.write(comments)
            utils.msg("Exported genetics saved to %s." % output_genetics)

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    if mode == 'toolbox':
        time.sleep(4)
Пример #24
0
def main(input_features=None,
         id_field=None,
         where_clause='',
         order_by=None,
         output_name=None,
         format_type='Excel',
         mode='toolbox'):

    script_path = os.path.abspath(__file__)
    utils.msg("Executing {}...".format(script_path))

    # try to set the id based on input, otherwise go off of the config.
    if id_field is not None:
        primary_id = id_field
    else:
        primary_id = settings.id_field

    # set mode based on how script is called.
    settings.mode = mode
    add_output = arcpy.env.addOutputsToMap
    arcpy.env.addOutputsToMap = True

    # ensure our order by field exists
    fields = [f.name for f in arcpy.ListFields(input_features)]
    if not order_by in fields:
        utils.msg("Unable to find order_by field, `{}`".format(order_by))
        sys.exit()

    # The Input Feature Class
    # == input_features

    # Where clause that can be used to pull out only those rows with genetic
    # data from the feature class.

    # NOTE: This parameter is optional and was included because some data
    # sets may have individual IDs based on more than just genetics
    # (i.e. photo-id).  If your data only has genetic records, this
    # parameter can be left blank.
    # == where_clause

    # The Attribute Field that distinguishes the populations in the input.

    # NOTE: This parameter is optional and was included because some data
    # sets may have more than one population in it.
    # == order_by

    try:
        # test opening the file to which the data will be written
        output_file = open(output_name, "wb")
    except Exception as e:
        utils.msg("Unable to open output file", mtype='error', exception=e)
        sys.exit()

    # initialize our haplotypes data
    haplotypes = utils.Haplotype(input_features)

    # set up the environment depending on the output format
    if format_type == 'Excel':
        # xlwt will write directly; only opened to make sure we can write to the location
        output_file.close()
        try:
            import xlwt
        except ImportError as e:
            msg = "Writing Excel Spreadsheets requires the `xlwt` library, which is" + \
                    " included in ArcGIS 10.2+. If you'd like Excel support in " + \
                    " ArcGIS 10.1, please install `xlwt` manually from PyPI: " +  \
                    "   https://pypi.python.org/pypi/xlwt/0.7.3"
            utils.msg(msg, mtype='error', exception=e)
            sys.exit()

        # only replace Nones, keep numeric types
        from utils import xrep as xstr
        from utils import zrep as zstr
    else:
        # convert all strings to text
        from utils import zstr as zstr
        from utils import xstr as xstr

    # list of lists to emulate the Excel sheet
    output_rows = []
    haplotype_rows = []

    utils.msg("Output file open and ready for data input")

    # Find our Loci columns.
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))
    """
    header row contains (in order):
     - number of loci
     - number of samples
     - number of populations
     - size of pop 1
     - size of pop 2
     - ...

    second row contains:
     - three blank cells
     - loci 1 label
     - loci 2 label
     - ...

    DATA starts at C4. See "GenAlEx Guide.pdf" page 15.
    """

    # sql clause can be prefix or suffix; set up ORDER BY
    sql_clause = (None, "ORDER BY {0} ASC".format(order_by))
    # query the input_features in ascending order; filtering as needed
    selected_columns = order_by
    pops = OrderedDict()
    rows = arcpy.da.SearchCursor(input_features, selected_columns,
                                 where_clause, "", "", sql_clause)
    row_count = 0
    for row in rows:
        row_count += 1
        pop = row[0]
        if pops.has_key(pop):
            pops[pop] += 1
        else:
            pops[pop] = 1

    pop_counts = [xstr(p) for p in pops.values()]

    # Creating the GenAlEx header information required for the text file.
    output_rows += [[loci.count, row_count, len(pops.keys())] + pop_counts]

    # optional title, then a list of each population
    output_rows += [['', '', ''] + pops.keys()]

    # first two rows almost exactly the same
    haplotype_rows = copy.deepcopy(output_rows)
    # 'number of loci' should be 1 for haplotype-only data
    haplotype_rows[0][0] = 1

    loci_labels = []
    for (key, cols) in loci.fields.items():
        loci_labels += [key] + [''] * (len(cols) - 1)

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type == 'Projected':
        loc_a = settings.x_coord
        loc_b = settings.y_coord
    if sr.type == 'Geographic':
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord

    primary_columns = [primary_id, order_by, loc_a, loc_b]
    exclude = primary_columns + loci.columns + ['OBJECTID', 'Shape']
    unselected_columns = []
    for field in fields:
        # add any field not currently mapped
        if field not in exclude:
            unselected_columns.append(field)
    # extra fields should start with an empty line, the location, then any
    # columns not otherwise mapped.
    extra_columns = ['', loc_a, loc_b] + unselected_columns

    output_rows += [[primary_id, order_by] + loci_labels + extra_columns]
    if haplotypes.defined:
        haplotype_rows += [[primary_id, order_by, haplotypes.column]]
    utils.msg("Header info written to output")

    # Note the WhereClause: Because the SPLASH data has both photo-id and genetic
    # records, but GenAlEx only uses genetic data, the WhereClause is used to ensure
    # only those records with genetic data are copied to the text file.
    selected_columns = primary_columns + unselected_columns + loci.columns

    for row in arcpy.da.SearchCursor(input_features, selected_columns,
                                     where_clause, "", "", sql_clause):
        id_field = row[0]  # as set on import
        pop = row[1]  # second column is 'order_by', or key column
        loc_a_val = row[2]
        loc_b_val = row[3]
        unselected_rows = list(row[4:len(unselected_columns) + 4])
        result_row = [id_field, pop]
        for (key, cols) in loci.fields.items():
            for col in cols:
                col_pos = selected_columns.index(col)
                result_row.append(row[col_pos])
        result_row = result_row + ["", loc_a_val, loc_b_val] + unselected_rows
        output_rows += [[zstr(s) for s in result_row]]
        if haplotypes.defined:
            row_val = row[selected_columns.index(haplotypes.column)]
            if row_val:
                haplotype_val = haplotypes.lookup[row_val]
            else:
                haplotype_val = 0
            haplotype_res = [id_field, pop, haplotype_val]
            haplotype_rows += [[zstr(s) for s in haplotype_res]]

    # depending on our driver, handle writing dependent on type
    if format_type == 'Excel':
        # initialize our spreadsheet
        workbook = xlwt.Workbook()

        # codominant data
        worksheet_co = workbook.add_sheet('Codominant')
        for (i, row) in enumerate(output_rows):
            for (j, val) in enumerate(row):
                worksheet_co.write(i, j, val)

        # the haplotype data
        if haplotypes.defined:
            worksheet_hap = workbook.add_sheet('Haplotype')
            for (i, row) in enumerate(haplotype_rows):
                for (j, val) in enumerate(row):
                    worksheet_hap.write(i, j, val)

            # write out a mapping of haplotype names
            worksheet_hap_map = workbook.add_sheet('Haplotype Map')
            worksheet_hap_map.write(0, 0, haplotypes.column)
            worksheet_hap_map.write(0, 1, 'Numeric Code')

            for (i, (hap, code)) in enumerate(haplotypes.lookup.items(),
                                              start=1):
                worksheet_hap_map.write(i, 0, hap)
                worksheet_hap_map.write(i, 1, code)

        workbook.save(output_name)
    else:
        # create a CSV writer
        writer = csv.writer(output_file,
                            dialect='excel',
                            quotechar='"',
                            quoting=csv.QUOTE_ALL)
        for row in output_rows:
            writer.writerow(row)

    utils.msg("Exported results saved to %s." % output_name)
    # Close Output text file
    output_file.close()

    if mode == 'toolbox':
        time.sleep(4)

    arcpy.env.addOutputsToMap = add_output
Пример #25
0
def run_geodesic_gp(input_fc, unit_factor, output_matrix, row_count, is_spagedi):
    input_fc_mem = 'in_memory/input_fc'
    try:
        utils.msg("Copying features into memory...")
        arcpy.CopyFeatures_management(input_fc, input_fc_mem)
    except Exception as e:
        msg = "Unable to copy features into memory."
        utils.msg(msg, mtype='error', exception=e)
        sys.exit()

    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_fc_mem)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        msg = "This tools only works with geographic or projected data."
        utils.msg(msg, mtype='error')
        sys.exit()

    utils.msg("Finding all input points...")
    distance_matrix = OrderedDict()
    points = OrderedDict()
    records = arcpy.da.SearchCursor(input_fc_mem, ['OID@', 'SHAPE@XY'])
    for row in records:
        (fid, point) = row
        points[fid] = arcpy.Point(point[0], point[1])

    indicator = 0
    utils.msg("Computing distances...")
    for (fid, from_point) in points.items():
        pct_progress = int(fid / float(row_count)*100)
        if pct_progress > indicator:
            indicator = pct_progress
            utils.msg("{0}%".format(indicator))
        p1 = points[fid]
        distance_matrix[fid] = OrderedDict()
        for (to_fid, to_point) in points.items():
            if to_fid == fid:
                dist = 0
            elif distance_matrix.has_key(to_fid) and \
                    distance_matrix[to_fid][fid] is not None:
                # here, modeling a symmetrical matrix
                dist = distance_matrix[to_fid][fid]
            else:
                p2 = points[to_fid]
                if p1.equals(p2):
                    dist = 0
                else:
                    # Each Polyline initialization must pay the COM object 
                    # gods, and ends up making this much more expensive than 
                    # AO C++ or even comtypes calls.
                    line = arcpy.Polyline(arcpy.Array([p1, p2]), sr)
                    # distance, always returned in meters, scale by our 
                    # expected result units.
                    dist = line.getLength("GEODESIC") * unit_factor
                    
            distance_matrix[fid][to_fid] = dist
    utils.msg("Distance matrix calculations complete.")

    # FIXME: generate it as a CSV file, then do TableToTable to pull it back in

    # Now compute the lines between these locations.
    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")
        # The SPAGeDi matrix format are described in section 3.7 of the manual.
        if is_spagedi:
            first_header_cell = "M%i" % row_count
            sep = "\t"
        else:
            first_header_cell = ""
            sep = ","

        with open(output_matrix, 'w') as csv:
            # initialize with our header row 
            output_rows = [[first_header_cell] + \
                    [str(s) for s in distance_matrix.keys()]]
            for (fid, row) in distance_matrix.items():
                res = [str(fid)] + [utils.xstr(s) for s in row.values()]
                output_rows.append(res)
            for row in output_rows:
                csv.write("{0}\n".format(sep.join(row)))
            if is_spagedi:
                csv.write("END\n")

    except Exception as e:
        utils.msg("Error creating distance matrix.", mtype='error', exception=e)
        sys.exit()
def index(url,name,type_):
    from utils import xstr, samealphabetic, hassamealphabetic
    from reddit import load_subredditsFile, parse_subreddit_entry, create_default_subreddits, assemble_reddit_filter_string, ret_sub_info, ret_settings_type_default_icon

    ## this is where the main screen is created

    if not os.path.exists(subredditsFile):  #if not os.path.exists(subredditsFile):
        create_default_subreddits()

    #if os.path.exists(subredditsPickle):
    #    subreddits_dlist=load_dict(subredditsPickle)
    #log( pprint.pformat(subreddits_dlist, indent=1) )
    #for e in subreddits_dlist: log(e.get('entry_name'))

    #testing code
    #h="as asd [S]asdasd[/S] asdas "
    #log(markdown_to_bbcode(h))
    #addDir('test', "url", "next_mode", "", "subreddit" )

    #liz = xbmcgui.ListItem(label="test", label2="label2", iconImage="DefaultFolder.png")
    #u=sys.argv[0]+"?url=&mode=callwebviewer&type="
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, url=u, listitem=liz, isFolder=False)

    #liz = xbmcgui.ListItem().fromString('Hello World')
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, listitem=liz, isFolder=False)
    subredditsFile_entries=load_subredditsFile()

    subredditsFile_entries.sort(key=lambda y: y.lower())

    addtl_subr_info={}

    #this controls what infolabels will be used by the skin. very skin specific.
    #  for estuary, this lets infolabel:plot (and genre) show up below the folder
    #  giving us the opportunity to provide a shortcut_description about the shortcuts
    xbmcplugin.setContent(pluginhandle, "mixed") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    next_mode='listSubReddit'

    for subreddit_entry in subredditsFile_entries:
        #strip out the alias identifier from the subreddit string retrieved from the file so we can process it.
        #subreddit, alias = subreddit_alias(subreddit_entry)
        addtl_subr_info=ret_sub_info(subreddit_entry)

        entry_type, subreddit, alias, shortcut_description=parse_subreddit_entry(subreddit_entry)
        #log( subreddit + "   " + shortcut_description )

        #url= urlMain+"/r/"+subreddit+"/.json?"+nsfw+allHosterQuery+"&limit="+itemsPerPage
        icon=default_icon='' #addon_path+"/resources/skins/Default/media/"+ret_settings_type_default_icon(entry_type)

        #log('  %s             icon=%s' %(subreddit, icon))
        url= assemble_reddit_filter_string("",subreddit, "yes")
        #log("assembled================="+url)
        if subreddit.lower() in ["all","popular"]:
            addDir(subreddit, url, next_mode, icon, subreddit, { "plot": translation(30009) } )  #Displays the currently most popular content from all of reddit
        else:
            if addtl_subr_info: #if we have additional info about this subreddit
                #log(repr(addtl_subr_info))
                title=xstr(addtl_subr_info.get('title'))+'\n'
                display_name=xstr(addtl_subr_info.get('display_name'))
                if samealphabetic( title, display_name): title=''
                #if re.sub('\W+','', display_name.lower() )==re.sub('\W+','', title.lower()): title=''
                #display_name=re.sub('\W+','', display_name.lower() )
                #title=re.sub('\W+','', title.lower())

                header_title=xstr(addtl_subr_info.get('header_title'))
                public_description=xstr( addtl_subr_info.get('public_description'))

                if samealphabetic( header_title, public_description): public_description=''
                if samealphabetic(title,public_description): public_description=''
                #if hassamealphabetic(header_title,title,public_description): public_description=''

                if entry_type=='subreddit':
                    display_name='r/'+display_name
                shortcut_description='[COLOR cadetblue][B]%s[/B][/COLOR]\n%s[I]%s[/I]\n%s' %(display_name,title,header_title,public_description )

                icon=addtl_subr_info.get('icon_img')
                banner=addtl_subr_info.get('banner_img')
                header=addtl_subr_info.get('header_img')  #usually the small icon on upper left side on subreddit screen

                #log( subreddit + ' icon=' + repr(icon) +' header=' + repr(header))
                #picks the first item that is not None
                icon=next((item for item in [icon,banner,header] if item ), '') or default_icon

                addDirR(alias, url, next_mode, icon,
                        type_=subreddit,
                        listitem_infolabel={ "plot": shortcut_description },
                        file_entry=subreddit_entry,
                        banner_image=banner )
            else:
                addDirR(alias, url, next_mode, icon, subreddit, { "plot": shortcut_description }, subreddit_entry )

    addDir("[B]- "+translation(30001)+"[/B]", "", 'addSubreddit', "", "", { "plot": translation(30006) } ) #"Customize this list with your favorite subreddit."
    addDir("[B]- "+translation(30005)+"[/B]", "",'searchReddits', "", "", { "plot": translation(30010) } ) #"Search reddit for a particular post or topic

    xbmcplugin.endOfDirectory(pluginhandle)
Пример #27
0
def main(input_features=None, where_clause=None, order_by=None, 
        output_name=None, mode=settings.mode):
   
    # get the spatial reference of our input, determine the type
    desc = arcpy.Describe(input_features)
    sr = desc.spatialReference
    if sr.type not in ['Geographic', 'Projected']:
        utils.msg("This tools only works with geographic or projected data.", mtype='error')
        sys.exit()
   
    # Find our Loci columns. 
    loci = utils.Loci(input_features)
    utils.msg("loci set: {0}".format(",".join(loci.names)))

    # sql clause can be prefix or suffix; set up ORDER BY
    sql_clause = (None, "ORDER BY {0} ASC".format(order_by))
    pops = OrderedDict()
    # query the input_features in ascending order; filtering as needed
    rows = arcpy.da.SearchCursor(input_features, order_by, where_clause, "", "", sql_clause)
    row_count = 0
    for row in rows:
        row_count += 1
        pop = row[0]
        if pops.has_key(pop):
            pops[pop] +=1
        else:
            pops[pop] = 1

    # Start with any number of header lines describing what this 
    # file is, each line should be prefixed with //. 
    comments = """// Export to SPAGeDi from the input `{input_features}`. 
// Export occurred on {datetime}
// Coordinates are in {sr_name}, a {sr_type} coordinate system.""".format(
        input_features=input_features, datetime=datetime.now(), \
        sr_name=sr.name, sr_type=sr.type)

    comment_row = [comments]

    """ 
    There are three possible ways to specify population (SPAGeDi manual, 2.4):
    
    1) as categorical groups, where one population includes all individuals 
       sharing the same categorical variable.

    2) as spatial groups, where a spatial group includes all individuals 
       sharing the same spatial coordinates and following each other in 
       the data file.

    3) as spatio-categorical groups, where a spatio-categorical group 
       includes all individuals belonging to both the same spatial 
       group and categorical group. When populations are defined using the 
       categorical variable, the spatial coordinates of a given population 
       are computed by averaging the coordinates of the individuals it contains.
    """

    # FIXME: another spot where 'observation_id' differs from 'individual_id';
    # deduplicate in order to have one value PER individual PER population.

    categories = len(pops.keys())

    # FIXME: presumes only two coords, SPAGeDi supports three. Extract depth?
    if sr.type == 'Geographic':
        # Assumes decimal degrees; based on the statement 'if the number of 
        # spatial coordinates is set to -2, latitudes and longitudes must be 
        # given in degrees with decimal, using negative numbers for Southern 
        # latitude or Western longitude.
        xy_type = -2
        # geographic data expected to be (lat, lon)
        loc_a = settings.y_coord
        loc_b = settings.x_coord
    else:
        # two coordinates in a projected space
        xy_type = 2
        loc_a = settings.x_coord
        loc_b = settings.y_coord
   
    num_loci = loci.count

    # FIXME: loci number of digits, defined in SPAGeDi manual 3.1 as "number 
    # of digits used to code one allele (1 to 3); or set a value =0 (in fact 
    # the value given for missing data) in the case of dominant markers".
    loci_digits = 2 

    # get the maximum number of different values per loci
    max_ploidy = max(map(len, loci.fields.values()))

    # 1st line: set of 6 numbers separated by a tabulation representing: 
    header_row = [
        row_count,   # number of invidivuals
        categories,  # number of categories
        xy_type,     # number of coordinates
        num_loci,    # number of loci 
        loci_digits, # number of digits used to code one allele
        max_ploidy   # max ploidy in the data
    ]

    # 2nd line: # of distance intervals; upper distance of each interval.

    # note 1: alternatively you can enter only the desired number of intervals
    #         preceded by a negative sign; the program then defines the 
    #         n maximal distances in such a way that the number of pairwise 
    #         comparisons within each distance interval is approx. constant.
    # note 2: if you do not wish distance intervals, put 0
    # note 3: if you use latitude + longitude, distance intervals 
    #         must be given in km.

    # TODO: what is this and how do we use it effectively?
    distances_row = [0]

    # 3rd line: column labels (<=15 characters).
    base_cols = [settings.id_field, order_by, loc_a, loc_b] 
    labels_row = base_cols + loci.columns

    # where_clause is used to ensure only those records with genetic data 
    # are copied to the output.
    selected_columns = base_cols + loci.columns
    rows = arcpy.da.SearchCursor(input_features, selected_columns, \
            where_clause, "", "", sql_clause)

    data_rows = []
    for row in rows:
        # our two string fields can't contain spaces, based on Autocio.c: 3857 
        id_field = str(row[0]).replace(" ", "_")
        pop_field = str(row[1]).replace(" ", "_") # 'order_by', or population 'group by'
        loc_a_val = row[2]
        loc_b_val = row[3]

        data_row = [id_field, pop_field, loc_a_val, loc_b_val]

        for (key, cols) in loci.fields.items():
            # Loci data can be encoded in a number of formats, the values
            # separated by any non-numeric values (SPAGeDi manual, 3.2.1). 
            # Here, we use spaces.

            loci_values = []
            for col in cols:
                col_pos = selected_columns.index(col)
                allele_val = row[col_pos]
                # FIXME: handle missing and incomplete genotypes.
                if allele_val is None:
                    allele_val = "0"
                else:
                    allele_val = str(allele_val)
                loci_len = len(allele_val) 
                if loci_len > loci_digits:
                    loci_digits = loci_len
                loci_values.append(allele_val)

            data_row.append(" ".join(loci_values))
        data_rows.append(data_row)

    # update header based on revised loci_digits
    header_row[4] = loci_digits

    # Now compute the lines between these locations.
    try:
        # copy the final result back to disk.
        utils.msg("Writing results to disk...")

        output_rows = [comment_row, header_row, distances_row, labels_row] + \
                data_rows

        # SPAGeDi expects tab-delimited outputs
        sep = "\t"

        with open(output_name, 'w') as output_file:
            for raw_row in output_rows:
                # convert all data to strings
                row = [utils.xstr(s) for s in raw_row]
                output_file.write("{0}\n".format(sep.join(row)))

            # after the last line of invidivual data the word END is required.
            output_file.write("END\n")

    except Exception as e:
        utils.msg("Error creating output file.", mtype='error', exception=e)
        sys.exit()

    utils.msg("Exported results saved to %s." % output_name)
    if mode == 'toolbox':
        time.sleep(4)
def index(url, name, type_):
    from utils import xstr, samealphabetic, hassamealphabetic
    from reddit import load_subredditsFile, parse_subreddit_entry, create_default_subreddits, assemble_reddit_filter_string, ret_sub_info, ret_settings_type_default_icon

    ## this is where the main screen is created

    if not os.path.exists(
            subredditsFile):  #if not os.path.exists(subredditsFile):
        create_default_subreddits()

    #if os.path.exists(subredditsPickle):
    #    subreddits_dlist=load_dict(subredditsPickle)
    #log( pprint.pformat(subreddits_dlist, indent=1) )
    #for e in subreddits_dlist: log(e.get('entry_name'))

    #testing code
    #h="as asd [S]asdasd[/S] asdas "
    #log(markdown_to_bbcode(h))
    #addDir('test', "url", "next_mode", "", "subreddit" )

    #liz = xbmcgui.ListItem(label="test", label2="label2", iconImage="DefaultFolder.png")
    #u=sys.argv[0]+"?url=&mode=callwebviewer&type="
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, url=u, listitem=liz, isFolder=False)

    #liz = xbmcgui.ListItem().fromString('Hello World')
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, listitem=liz, isFolder=False)
    subredditsFile_entries = load_subredditsFile()

    subredditsFile_entries.sort(key=lambda y: y.lower())

    addtl_subr_info = {}

    #this controls what infolabels will be used by the skin. very skin specific.
    #  for estuary, this lets infolabel:plot (and genre) show up below the folder
    #  giving us the opportunity to provide a shortcut_description about the shortcuts
    xbmcplugin.setContent(
        pluginhandle, "mixed"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    next_mode = 'listSubReddit'

    for subreddit_entry in subredditsFile_entries:
        #strip out the alias identifier from the subreddit string retrieved from the file so we can process it.
        #subreddit, alias = subreddit_alias(subreddit_entry)
        addtl_subr_info = ret_sub_info(subreddit_entry)

        entry_type, subreddit, alias, shortcut_description = parse_subreddit_entry(
            subreddit_entry)
        #log( subreddit + "   " + shortcut_description )

        #url= urlMain+"/r/"+subreddit+"/.json?"+nsfw+allHosterQuery+"&limit="+itemsPerPage
        icon = default_icon = ''  #addon_path+"/resources/skins/Default/media/"+ret_settings_type_default_icon(entry_type)

        #log('  %s             icon=%s' %(subreddit, icon))
        url = assemble_reddit_filter_string("", subreddit, "yes")
        #log("assembled================="+url)
        if subreddit.lower() in ["all", "popular"]:
            addDir(subreddit, url, next_mode, icon, subreddit, {
                "plot": translation(30009)
            })  #Displays the currently most popular content from all of reddit
        else:
            if addtl_subr_info:  #if we have additional info about this subreddit
                #log(repr(addtl_subr_info))
                title = xstr(addtl_subr_info.get('title')) + '\n'
                display_name = xstr(addtl_subr_info.get('display_name'))
                if samealphabetic(title, display_name): title = ''
                #if re.sub('\W+','', display_name.lower() )==re.sub('\W+','', title.lower()): title=''
                #display_name=re.sub('\W+','', display_name.lower() )
                #title=re.sub('\W+','', title.lower())

                header_title = xstr(addtl_subr_info.get('header_title'))
                public_description = xstr(
                    addtl_subr_info.get('public_description'))

                if samealphabetic(header_title, public_description):
                    public_description = ''
                if samealphabetic(title, public_description):
                    public_description = ''
                #if hassamealphabetic(header_title,title,public_description): public_description=''

                if entry_type == 'subreddit':
                    display_name = 'r/' + display_name
                shortcut_description = '[COLOR cadetblue][B]%s[/B][/COLOR]\n%s[I]%s[/I]\n%s' % (
                    display_name, title, header_title, public_description)

                icon = addtl_subr_info.get('icon_img')
                banner = addtl_subr_info.get('banner_img')
                header = addtl_subr_info.get(
                    'header_img'
                )  #usually the small icon on upper left side on subreddit screen

                #log( subreddit + ' icon=' + repr(icon) +' header=' + repr(header))
                #picks the first item that is not None
                icon = next((item for item in [icon, banner, header] if item),
                            '') or default_icon

                addDirR(alias,
                        url,
                        next_mode,
                        icon,
                        type_=subreddit,
                        listitem_infolabel={"plot": shortcut_description},
                        file_entry=subreddit_entry,
                        banner_image=banner)
            else:
                addDirR(alias, url, next_mode, icon, subreddit,
                        {"plot": shortcut_description}, subreddit_entry)

    addDir("[B]- " + translation(30001) + "[/B]", "", 'addSubreddit', "", "",
           {"plot": translation(30006)
            })  #"Customize this list with your favorite subreddit."
    addDir("[B]- " + translation(30005) + "[/B]", "", 'searchReddits', "", "",
           {"plot": translation(30010)
            })  #"Search reddit for a particular post or topic

    xbmcplugin.endOfDirectory(pluginhandle)
Пример #29
0
			print "START TIME: [" + time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) + "]" 
			print "START COMMAND: " + COMMAND
			outputpath, outputname, user, password = export_hive_to_file(COMMAND, utils.listToString(parametersAliasList), type, outputname, ftpuser)
			try:
				email("Report: Data Export Utility","complete!","smithmicro.com",emailaddr,"{'text':'email from export report', 'html':'<p>Your file is now complete. To access your report, click on this link: <a href=\"ftp://"+os.uname()[1]+"/"+type+"/"+outputname+"\">"+outputname+"</a> and enter your username and password below.</p><p>Username: <b>"+user.strip()+"</b><br />Password: <b>"+password.strip()+"</b></p>'}")
			except:
				print "The Email Server has Raised an Error"
			if bool(scp) == True: 	
				email_addr = user.strip()	
				scp_method(email_addr, outputname, target_user, target_server, target_dir)
	except IOError, e:
		print "I/O error(%(error0)s):(%(error1)s)" % { "error0":e[0],"error1":e[1] }
		email("Export_Report","failed","analytics.smithmicro.com",emailaddr,"\nError:\nI/O error("+utils.xstr(e[0])+"): "+xstr(e[1])+"\n"+utils.xstr(sys.exc_info()[0]))
	except NameError, e:
		print "An internal error has occured, possibly user input or system is temporarily unavailable."
		email("Export_Report","failed","analytics.smithmicro.com",emailaddr,"\nError:\nNameError("+utils.xstr(sys.exc_info()[0])+utils.xstr(e[0])+"): "+"An internal error has occured, possibly user input or system is temporarily unavailable. Please contact your administrator or SmithMicro at [email protected].")
		raise
	except:
		print "Unexpected Error: " + utils.xstr(sys.exc_info()[0])
		email("Export_Report","failed","analytics.smithmicro.com",emailaddr,"\nError:\n"+utils.xstr(sys.exc_info()[0]))
	print "\n+++++===HIVE END COMMAND===+++++++: " + COMMAND + "\n"
	return COMMAND
			
def main(argv=None):
  if argv is None:
    argv = sys.argv[1:]
  else:
    argv = argv.split()
  try:
    #print 'ARGV      :', argv[:]
    try: