Example #1
1
class QuestionTable(object):
    def __init__(self):
        self._table = OrderedDict()

    def __iter__(self):
        return self._table.__iter__()

    def questions(self):
        return [q for qList in self._table.values() for q in qList]

    def identifiers(self):
        return self._table.keys()

    def add(self, question):
        questionList = self._table.get(question.identifier, QuestionList())
        questionList.append(question)
        self._table[question.identifier] = questionList

    def get(self, identifier):
        questions = self._table.get(identifier, None)
        if questions:
            return questions.getVisibleQuestion()

    def getType(self, identifier):
        questions = self._table.get(identifier, None)
        if questions:
            return questions[0].type

    def getQuestionList(self, identifier):
        return self._table.get(identifier, None)
Example #2
1
def _prepareFeatureLangSys(langTag, langSys, table, features, scriptTag, scriptStatus, getStatus):
    # This is a part of prepareFeatures
    printScript, scriptRequired, = scriptStatus
    printLang, langRequired = getStatus((langTag, langSys), scriptRequired)
    for featureIdx in langSys.FeatureIndex:
        featureRecord = table.table.FeatureList.FeatureRecord[featureIdx]
        printFeature, featureRequired = getStatus(featureRecord, langRequired)

        featureTag = featureRecord.FeatureTag
        scripts, _ = features.get(featureTag, (None, None))
        if scripts is None:
            scripts = OrderedDict()
            features[featureTag] = (scripts, printFeature)

        languages, _ = scripts.get(scriptTag, (None, None))
        if languages is None:
            languages = OrderedDict()
            scripts[scriptTag] = (languages, printScript)

        lookups, _ = languages.get(langTag, (None, None))
        if lookups is None:
            lookups = []
            languages[langTag] = (lookups, printLang)

        for lookupIdx in featureRecord.Feature.LookupListIndex:
            lookup = table.table.LookupList.Lookup[lookupIdx]
            printLookup, _ = getStatus(lookup, featureRequired)
            lookups.append((lookupIdx, printLookup))
Example #3
1
    def get_constraint_updates(self):

        updates = OrderedDict()

        ## unit-variance constraint on hidden-unit activations ##
        if self.flags["unit_std"]:
            updates[self.Wv] = self.Wv / self.avg_hact_std

        ## clip parameters to maximum values (if applicable)
        for (k, v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k, v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            updates[param] = T.clip(updates.get(param, param), v, param)

        ## constrain lambd to be a scalar
        if self.flags["scalar_lambd"]:
            lambd = updates.get(self.lambd, self.lambd)
            updates[self.lambd] = T.mean(lambd) * T.ones_like(lambd)

        return updates
Example #4
1
def parse_atrack_msg(text):
    field_names = (
        "@P,CRC,L,Seq.ID,UNID,GPS time, RTC time, Position time, Lon, "
        "Lat, Heading, Report ID, Odo, HDOP, DI, Speed, DO, AI, DVID, "
        "1st Temp, 2nd Temp, Text"
    ).split(",")
    field_vals = text.split(",")
    logger.debug("field_vals: {}".format(field_vals))
    out = OrderedDict([(k, v) for k, v in zip(field_names, field_vals)])
    out_processed = OrderedDict(
        [
            ("device_id", out.get("UNID", -1)),
            ("driver_id", out.get("DVID", -1)),
            ("report_id", out.get("Report ID", -1)),
            ("timestamp", out.get("Position time", int(time.time()))),
            ("hdop", out.get("HDOP", 990)),
            ("latitude", float(out.get("Lon", 1300000)) / 1e6),
            ("longitude", float(out.get("Lat", 103800000)) / 1e6),
            ("heading", out.get("Heading", -1)),
            ("speed", out.get("Speed", -1)),
            ("odometer", out.get("Odo", -1)),
            ("temperature_1", out.get("1st Temp", -1)),
            ("temperature_2", out.get("2nd Temp", -1)),
        ]
    )
    logger.debug("Parse result:\n{}".format(out_processed))
    return out_processed
Example #5
1
def writeTotals(journals, folders):

    # Declare an ordered dict that will hold the field, then the journal totals in that field
    total_list = OrderedDict()

    for field in journals:
        # print 'Field: ',field[1]
        totals = []
        for i in range(len(field[0])):
            # print 'Journal: ',field[0][i][0][1],' Length',len(field[0][i])
            totals.append([field[0][i][0][1], len(field[0][i])])
        total_list[field[1]] = totals

    for key in total_list:
        writeto = folders[0] + key.split("_")[0] + "/"
        writeto = folders[0]
        make_sure_path_exists(writeto)
        print "Key: ", key
        # with open(writeto+key.split('_')[1]+'.csv','w') as outfile:
        with open(writeto + key.split(".")[0] + ".csv", "w") as outfile:
            for i in range(len(total_list.get(key))):
                outfile.write(str('"' + total_list.get(key)[i][0]) + '",' + str(total_list.get(key)[i][1]) + "\n")

    for key in total_list:
        writeto = folders[1] + key.split("_")[0] + "/"
        writeto = folders[1]
        make_sure_path_exists(writeto)
        # with open(writeto+key.split('_')[1]+'.csv','w') as outfile:
        with open(writeto + key.split(".")[0] + ".csv", "w") as outfile:
            for i in range(len(total_list.get(key))):
                outfile.write(str(i + 1) + "," + str(total_list.get(key)[i][1]) + "\n")
Example #6
1
    def get(self, request, *args, **kwargs):
        callback = EpayCallback.objects.create(payload=request.GET)

        if "orderid" in request.GET:
            query = OrderedDict(map(lambda x: tuple(x.split("=")), request.META["QUERY_STRING"].split("&")))
            order = get_object_or_404(Order, pk=query.get("orderid"))
            if order.pk != self.get_object().pk:
                print "bad epay callback, orders do not match!"
                return HttpResponse(status=400)

            if validate_epay_callback(query):
                callback.md5valid = True
                callback.save()
            else:
                print "bad epay callback!"
                return HttpResponse(status=400)

            if order.paid:
                ### this order is already paid, perhaps we are seeing a double callback?
                return HttpResponse("OK")

            ### epay callback is valid - has the order been paid in full?
            if int(query["amount"]) == order.total * 100:
                ### create an EpayPayment object linking the callback to the order
                EpayPayment.objects.create(order=order, callback=callback, txnid=query.get("txnid"))
                ### and mark order as paid (this will create tickets)
                order.mark_as_paid()
            else:
                print "valid epay callback with wrong amount detected"
        else:
            return HttpResponse(status=400)

        return HttpResponse("OK")
Example #7
1
class Config(object):
    def __init__(self, axes, positions):
        def sort_key(nx):
            name, axis = nx
            try:
                return (0, "xyz".index(name))
            except ValueError:
                return (1, name)

        self.axes = OrderedDict(sorted(((a.name, a) for a in axes), key=sort_key))
        self.positions = {pos.name: pos for pos in positions}

    @property
    def x(self):
        return self.axes.get("x")

    @property
    def y(self):
        return self.axes.get("y")

    @property
    def z(self):
        return self.axes.get("z")

    @property
    def home(self):
        return self.positions.get("home")

    @property
    def origin(self):
        return self.positions.get("origin")
Example #8
0
class Args(object):
    def __init__(self, *args, **kwargs):
        self._items = OrderedDict()
        for arg in args:
            k, _, v = arg.partition("=")
            k = k.lstrip("-")
            if not kwargs.get("flatten"):
                self._items.setdefault(k, []).append(v)
            else:
                self._items[k] = v

    def get(self, k, default=None):
        return self._items.get(k, default)

    def items(self):
        return self._items.items()

    def __getattr__(self, k, default=None):
        return self._items.get(k, default)

    def __contains__(self, k):
        return k in self._items

    def __getitem__(self, k):
        return self._items["k"]
Example #9
0
def linear_seq_clusterer(stats, decisions, key="iqrs", delta=0.25):
  point_to_cluster = OrderedDict()
  for stat in stats:
    vals = stat[key]
    clusters = []
    cluster_prev = [vals[0]]
    clusters.append(cluster_prev)
    current_cluster = point_to_cluster.get(0, set())
    current_cluster.add(len(clusters))
    point_to_cluster[0] = current_cluster
    for index, val in enumerate(vals[1:]):
      prev_mean = np.mean(cluster_prev)
      if abs(prev_mean - val) > delta * prev_mean:
        cluster_prev = [val]
        clusters.append(cluster_prev)
      else:
        cluster_prev.append(val)
      current_cluster = point_to_cluster.get(index+1, set())
      current_cluster.add(len(clusters))
      point_to_cluster[index+1] = current_cluster
  columns = ["Cluster ID", "Decision Name"]
  table = PrettyTable(columns)
  prev_val = None
  for key, val in point_to_cluster.items():
    current_val = ",".join(map(str, list(val)))
    if current_val == prev_val:
      row = ["\"", decisions[key].name]
    else:
      row = [current_val, decisions[key].name]
      prev_val = current_val
    table.add_row(row)
  print("\n### Decisions Clustered")
  print("```")
  print(table)
  print("```")
Example #10
0
class VoidNode(object):
    def __init__(self, name):
        super(VoidNode, self).__init__()
        self.inputPorts = OrderedDict()
        self.outputPorts = OrderedDict()
        self.initPorts()
        self.name = name

    def initPorts(self):
        pass

    def getInputPort(self, name):
        return self.inputPorts.get(name)

    def getOutputPort(self, name):
        return self.outputPorts.get(name)

    def addInputPort(self, name):
        port = InputPort(name)
        port.owner = self
        self.inputPorts[name] = port

    def addOutputPort(self, name):
        port = OutputPort(name)
        port.owner = self
        self.outputPorts[name] = port

    def evaluate(self):
        logger.debug("Evaluating {}".format(self))
Example #11
0
def _update_rdc_restraints(restraint_list, alpha, timestep, force_dict):
    # split restraints into rdc and non-rdc
    rdc_restraint_list = [r for r in restraint_list if isinstance(r, RdcRestraint)]
    nonrdc_restraint_list = [r for r in restraint_list if not isinstance(r, RdcRestraint)]

    # if we have any rdc restraints
    if rdc_restraint_list:
        rdc_force = force_dict["rdc"]
        # make a dictionary based on the experiment index
        expt_dict = OrderedDict()
        for r in rdc_restraint_list:
            expt_dict.get(r.expt_index, []).append(r)

        # loop over the experiments and update the restraints
        index = 0
        for experiment in expt_dict:
            rests = expt_dict[experiment]
            for r in rests:
                scale = r.scaler(alpha) * r.ramp(timestep)
                rdc_force.updateRdcRestraint(
                    index,
                    r.atom_index_1 - 1,
                    r.atom_index_2 - 1,
                    r.kappa,
                    r.d_obs,
                    r.tolerance,
                    r.force_const * scale,
                    r.weight,
                )
                index = index + 1

    return nonrdc_restraint_list
Example #12
0
def run():
    """Run the agent for a finite number of trials."""

    # Set up environment and agent
    gammas = [x / 10.0 for x in xrange(0, 10)]
    gamma_to_success_rate = OrderedDict()
    gamma_to_average_reward = OrderedDict()
    # Run a simulation for each sample gamma value to test which
    # choice of gamma results in the most successful agent
    for gamma in gammas:
        # Run 10 trials over each choice of gamma to get average performance metrics
        for trial in xrange(10):
            e = Environment()  # create environment (also adds some dummy traffic)
            a = e.create_agent(LearningAgent, (gamma))  # create agent
            e.set_primary_agent(a, enforce_deadline=True)  # set agent to track

            # Now simulate it
            sim = Simulator(e, update_delay=0.0)  # reduce update_delay to speed up simulation
            sim.run(n_trials=50)  # press Esc or close pygame window to quit

            gamma_to_success_rate[a.GAMMA] = gamma_to_success_rate.get(a.GAMMA, 0) + sim.env.successful_trials
            gamma_to_average_reward[a.GAMMA] = (
                gamma_to_average_reward.get(a.GAMMA, 0) + a.get_average_reward_per_action()
            )

        # Get the average of the 10 trials
    for gamma in gamma_to_average_reward.keys():
        gamma_to_average_reward[gamma] = gamma_to_average_reward[gamma] / 10
        gamma_to_success_rate[gamma] = gamma_to_success_rate[gamma] / 10
    print gamma_to_average_reward
    print gamma_to_success_rate
Example #13
0
class PlainTextEvent(Event):
    def __str__(self):
        template = "{type} {name}"
        if self.subclass:
            if self.info:
                template = "{type} {name} {subclass} {info}"
            else:
                template = "{type} {name} {subclass}"
        elif self.info:
            template = "{type} {name} {info}"
        return template.format(type=self.type, name=self.name, info=self.info, subclass=self.subclass)

    def parse(self):
        # content contains key:value pairs for plain text events
        content_dict = utils.EventDict(self.content)
        self.dict.update(content_dict)
        self.dict = OrderedDict(sorted(self.dict.items(), key=lambda t: t[0].lower()))
        self.content = content_dict.content
        # delegate the event to any subscription functions
        self.delegate()

    def delegate(self):
        self.protocol._eventPlainTextDelegator(self, self.name, subclass=self.subclass, content=self.content)

    @property
    def name(self):
        return self.dict.get("Event-Name", "").strip()

    @property
    def info(self):
        return self.dict.get("Event-Info", "").strip()

    @property
    def subclass(self):
        return self.dict.get("Event-Subclass", "").strip()
Example #14
0
class Question:
    __slots__ = ["qid", "title", "tags", "type", "restriction", "answers"]

    """Class for each question in a questionnaire"""

    def __init__(self, qid, title=None, tags=None, _type="_def", restriction=None):
        self.qid, self.title, self.tags, self.type, self.restriction = qid, title, tags, _type, restriction
        self.answers = OrderedDict()

    def add_answer(self, aid, score=0, content=None, _type=None):
        self.answers[aid] = Answer(aid, score, content, _type)

    def get_answer_score(self, aid):
        a = self.answers.get(aid, None)
        return a.score if a is not None else 0

    def get_answer_content(self, aid):
        a = self.answers.get(aid, None)
        return a.content if a is not None else aid

    def to_dict(self):
        return {
            "qid": self.qid,
            "title": self.title,
            "tags": self.tags,
            "type": self.type,
            "restriction": self.restriction,
            "answers": self.answers,
        }
Example #15
0
def execute(directories,pid):
   final_map=OrderedDict(list())
   for directory in directories:
      samplefile=open(directory+'/sample_data2.csv','w')
      for d,s,f in os.walk(directory):

         mydict=OrderedDict(list())
         if d=="*.py" or d=="*.pyc" or d=="*.csv":
            pass
         else:
            print "Processing [%s] : %s"%(pid,d)
            for f1 in f:
    #print "Processing [%s] : %s"%(pid,f1)
               if f1=="sample_data2.csv" or f1=="sample_data.csv":
                  pass
               else:
                  mfile = open('%s/%s'%(d,f1))
                  reader = csv.reader(mfile,delimiter=',')
                  t=0
                  headers = None
                  for line in reader:
                     interval=line[3]
                     date_value = str(line[6])
                     ch_value   = line[5]
                     date,time = date_value.split(" ")
		             p,q,r=time.split(":")
       		         hour=p
		             minu=q
                     if date not in mydict:
                        mydict[date] = []

                     mydict.get(date).append((line[2],ch_value,hour,interval,minu))
 def _get_cpu_topology(self):
     cpu_topology_file = "/proc/cpuinfo"
     # physical processor -> physical core -> logical processing units (threads)
     cpu_topology = OrderedDict()
     if not os.path.exists(cpu_topology_file):
         raise DpdkSetup("File with CPU topology (%s) does not exist." % cpu_topology_file)
     with open(cpu_topology_file) as f:
         for lcore in f.read().split("\n\n"):
             if not lcore:
                 continue
             lcore_dict = OrderedDict()
             for line in lcore.split("\n"):
                 key, val = line.split(":", 1)
                 lcore_dict[key.strip()] = val.strip()
             if "processor" not in lcore_dict:
                 continue
             numa = int(lcore_dict.get("physical id", -1))
             if numa not in cpu_topology:
                 cpu_topology[numa] = OrderedDict()
             core = int(lcore_dict.get("core id", lcore_dict["processor"]))
             if core not in cpu_topology[numa]:
                 cpu_topology[numa][core] = []
             cpu_topology[numa][core].append(int(lcore_dict["processor"]))
     if not cpu_topology:
         raise DpdkSetup("Could not determine CPU topology from %s" % cpu_topology_file)
     return cpu_topology
Example #17
0
    class Read(OrgObjPermsMixin, ContactFieldsMixin, ContactBase, SmartReadView):
        def derive_fields(self):
            fields = ["urn", "region", "group", "language", "last_response"]
            if self.object.created_by_id:
                fields.append("created_by")

            # Show values for the visible data fields.
            # Which data fields to show are configured on the Org edit page.
            self.data_fields = [(f.key, f) for f in self.object.org.datafield_set.visible()]
            self.data_fields = OrderedDict(self.data_fields)
            fields.extend(self.data_fields.keys())

            return fields

        def get_last_response(self, obj):
            last_response = obj.responses.order_by("-updated_on").first()
            return last_response.updated_on if last_response else _("Never")

        def lookup_field_label(self, context, field, default=None):
            if field == "urn":
                scheme = self.object.get_urn()[0]
                return dict(URN_SCHEME_CHOICES)[scheme]
            elif field in self.data_fields:
                return self.data_fields.get(field).display_name
            return super(ContactCRUDL.Read, self).lookup_field_label(context, field, default)

        def lookup_field_value(self, context, obj, field):
            if field in self.data_fields:
                value = self.data_fields.get(field).contactfield_set.filter(contact=obj)
                value = value.first()
                return value.get_value() or "-" if value else "Unknown"
            return super(ContactCRUDL.Read, self).lookup_field_value(context, obj, field)
Example #18
0
    def get_constraint_updates(self):
        constraint_updates = OrderedDict()
        if self.flags["scalar_lambd"]:
            constraint_updates[self.lambd] = T.mean(self.lambd) * T.ones_like(self.lambd)

        # constraint filters to have unit norm
        if self.flags["wv_norm"] in ("unit", "max_unit"):
            wv = constraint_updates.get(self.Wv, self.Wv)
            wv_norm = T.sqrt(T.sum(wv ** 2, axis=0))
            if self.flags["wv_norm"] == "unit":
                constraint_updates[self.Wv] = wv / wv_norm
            elif self.flags["wv_norm"] == "max_unit":
                constraint_updates[self.Wv] = wv / wv_norm * T.minimum(wv_norm, 1.0)

        constraint_updates[self.scalar_norms] = T.maximum(1.0, self.scalar_norms)
        ## clip parameters to maximum values (if applicable)
        for (k, v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k, v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(constraint_updates.get(param, param), v, param)

        return constraint_updates
Example #19
0
def main():
    with open(sys.argv[1], "r") as fi:
        seqs = OrderedDict()
        for line in fi:
            line = line.strip()
            if line.startswith(">"):
                header = line
            else:
                if header not in seqs:
                    seqs.update({header: [line]})
                else:
                    seqs.get(header).append(line)

        count = 0
        dna = None
        for key, value in seqs.items():
            count += 1
            if count == 1:
                dna = "".join(value)
            else:
                try:
                    index = dna.index("".join(value))
                    dna = dna[0:index] + dna[index + len("".join(value)) :]
                except ValueError:
                    pass
        protein = translation(dna)
        print(protein)
Example #20
0
    def get_uniques(self, candidate_reporting_units):
        """
        Parses out unique candidates and ballot measures
        from a list of CandidateReportingUnit objects.
        """
        unique_candidates = OrderedDict()
        unique_ballot_measures = OrderedDict()

        for c in candidate_reporting_units:
            if c.is_ballot_measure:
                if not unique_ballot_measures.get(c.candidateid, None):
                    unique_ballot_measures[c.candidateid] = BallotMeasure(
                        last=c.last,
                        candidateid=c.candidateid,
                        polid=c.polid,
                        ballotorder=c.ballotorder,
                        polnum=c.polnum,
                        seatname=c.seatname,
                        description=c.description,
                    )
            else:
                if not unique_candidates.get(c.candidateid, None):
                    unique_candidates[c.candidateid] = Candidate(
                        first=c.first,
                        last=c.last,
                        candidateid=c.candidateid,
                        polid=c.polid,
                        ballotorder=c.ballotorder,
                        polnum=c.polnum,
                        party=c.party,
                    )

        candidates = [v for v in unique_candidates.values()]
        ballot_measures = [v for v in unique_ballot_measures.values()]
        return candidates, ballot_measures
Example #21
0
    def cell(self, values):
        if not values:
            values = []
        values = list(values)
        self._cell_unvalidated = values

        filtered_areas = OrderedDict()
        filtered_cells = OrderedDict()
        for value in values:
            valid_area = CellAreaLookup.create(**value)
            if valid_area:
                existing = filtered_areas.get(valid_area.hashkey())
                if existing is not None and existing.better(valid_area):
                    pass
                else:
                    filtered_areas[valid_area.hashkey()] = valid_area
            valid_cell = CellLookup.create(**value)
            if valid_cell:
                existing = filtered_cells.get(valid_cell.hashkey())
                if existing is not None and existing.better(valid_cell):
                    pass
                else:
                    filtered_cells[valid_cell.hashkey()] = valid_cell
        self._cell_area = list(filtered_areas.values())
        self._cell = list(filtered_cells.values())
    def get_constraint_updates(self):
        constraint_updates = OrderedDict()

        if self.flags["wv_norm"] == "unit":
            norm_wv = T.sqrt(T.sum(self.Wv ** 2, axis=0))
            constraint_updates[self.Wv] = self.Wv / norm_wv

        if self.flags["scalar_lambd"]:
            constraint_updates[self.lambd] = T.mean(self.lambd) * T.ones_like(self.lambd)

        ## Enforce sparsity pattern on g if required ##
        if self.sparse_gmask:
            constraint_updates[self.Wg] = self.Wg * self.sparse_gmask.mask.T

        ## clip parameters to maximum values (if applicable)
        for (k, v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(param, param, v)

        ## clip parameters to minimum values (if applicable)
        for (k, v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = constraint_updates.get(k, getattr(self, k))
            constraint_updates[param] = T.clip(constraint_updates.get(param, param), v, param)

        return constraint_updates
Example #23
0
class MockQualtrics(object):
    """ Mock object for unit testing code that uses pyqualtrics library

    """
    def __init__(self, user=None, token=None, api_version="2.5"):
        self.user = user
        self.token = token
        self.api_version = api_version
        self.last_error_message = None
        self.last_url = None
        self.json_response = None
        self.response = None  # For debugging purpose
        self.mock_responses = OrderedDict()
        self.mock_responses_labels = OrderedDict()

    def getResponse(self, SurveyID, ResponseID, Labels=None, **kwargs):
        if Labels == "1":
            return self.mock_responses_labels.get(ResponseID, None)
        else:
            return self.mock_responses.get(ResponseID, None)

    def getLegacyResponseData(self, SurveyID, Labels=None, **kwargs):
        if Labels == "1":
            return self.mock_responses_labels
        else:
            return self.mock_responses
Example #24
0
def extract_app_features(log_df):
    """
    Extract features from logs of "InstalledApplications" modality.
    :return: list of field names, list of feature names, list of feature values
    """
    col_lv1 = ["appCategory"] * len(feat.APP_CATEGORY) + ["appName", "appPrice", "appPrice"]
    col_lv2 = feat.APP_CATEGORY + [feat.UNIQUE_NUM, feat.PAID_RATIO, feat.SUM]
    values = []

    if param.FEATURE_SET_EXTENSION_APP:
        with open("data_set/lexicon.txt", "r") as f:
            lexicon = OrderedDict((word.strip(), order) for order, word in enumerate(f.readlines()))
        with open("data_set/apks.txt", "r") as f:
            apks = OrderedDict((apk.strip(), order) for order, apk in enumerate(f.readlines()))
        app_content_arr = np.genfromtxt("data_set/app_contents.csv", dtype=None, delimiter=",", names=True)
        apk_contents = {row[0]: row[1].split() for row in app_content_arr}

        col_lv1 += ["itemBased"] * len(apks) + ["contentBased"] * len(lexicon)
        col_lv2 += apks.keys() + lexicon.keys()

    if len(log_df) == 0:
        return col_lv1, col_lv2, [np.nan] * len(col_lv2)

    else:
        install_only = log_df[log_df["appName"] != "NOT_EXISTS"]
        grouped_by_name = install_only.groupby("appName")
        install_only = grouped_by_name.first()

        ratio = map(
            lambda x: len(install_only[install_only["appCategory"] == x]) / float(len(install_only))
            if len(install_only) > 0
            else np.nan,
            feat.APP_CATEGORY,
        )
        values += ratio + [
            len(install_only),
            len(install_only[install_only["appPrice"] > 0]) / float(len(install_only))
            if len(install_only) > 0
            else np.nan,
        ]

        total_cost = sum(install_only["appPrice"])
        values.append(total_cost)

        if param.FEATURE_SET_EXTENSION_APP:
            item_based = [0] * len(apks)
            content_based = [0] * len(lexicon)
            for apk_name in install_only["packageName"]:
                if apk_name in apks:
                    item_based[apks.get(apk_name)] = 1
                if apk_name in apk_contents:
                    content = apk_contents.get(apk_name)
                    for word in content:
                        if word in lexicon:
                            content_based[lexicon.get(word)] += 1
            values.extend(item_based)
            values.extend(content_based)

        return col_lv1, col_lv2, values
def writeFile():
   final_map5 = OrderedDict(list())
   my_list=sorted(final_map3.keys())
  
   for k1 in my_list:
      for k,v in final_map3.items():
         if k1==k:
            for item in v:
               t1,t2,t3=item
               f=(k,t1)
               if f not in final_map3:
                  final_map3[f]=[]
               final_map5.get(f).append((t2,t3))

   s1= OrderedDict()
   for k,v in final_map5.items():
      for item in v:
         t1,t2=item
         if t1 not in s1:
            s1[t1]=[]
   for item2 in s1:
      for k,v in final_map5.items():
         x=0
         for item in v:
            t1,t2=item
            if t1==item2:
	           x+=1
         if x==0:
            final_map5.get(k).append((item2,0))

   f1 = open('/home/sbat/external_drive/mainload/final_output_submeter.csv','w')
   headerlist=['store']
   headerlist.append("date")
   for k,v in s1.items():
      #print("writing store names")
      if k == '':
         pass
      else:
         headerlist.append(str(k))

   f1.write('%s\n'%','.join(headerlist))
   print("wrote headers")

   for k,v in final_map5.items():
      store=k[1]
      date=k[0]
      data_list =[store]
      data_list.append(str(date))
      for item1 in s1:
         for item in v:
            t1,t2=item
            if item1==t1:
               data_list.append(str(t2))
	       

      f1.write('%s\n'%','.join(data_list))
   print("done writing............................................")
   f1.close()
Example #26
0
def group_log_entries(user, year, month, day=None):
    """
    Processes and regroups a list of log entries so they can be more easily
    used in the different calendar pages

    :param user: the user to filter the logs for
    :param year: year
    :param month: month
    :param day: optional, day

    :return: a dictionary with grouped logs by date and exercise
    """
    if day:
        log_hash = hash((user.pk, year, month, day))
    else:
        log_hash = hash((user.pk, year, month))

    # There can be workout sessions without any associated log entries, so it is
    # not enough so simply iterate through the logs
    if day:
        filter_date = datetime.date(year, month, day)
        logs = WorkoutLog.objects.filter(user=user, date=filter_date)
        sessions = WorkoutSession.objects.filter(user=user, date=filter_date)

    else:
        logs = WorkoutLog.objects.filter(user=user, date__year=year, date__month=month)

        sessions = WorkoutSession.objects.filter(user=user, date__year=year, date__month=month)

    logs = logs.order_by("date", "id")
    out = cache.get(cache_mapper.get_workout_log_list(log_hash))
    # out = OrderedDict()

    if not out:
        out = OrderedDict()

        # Logs
        for entry in logs:
            if not out.get(entry.date):
                out[entry.date] = {
                    "date": entry.date,
                    "workout": entry.workout,
                    "session": entry.get_workout_session(),
                    "logs": OrderedDict(),
                }

            if not out[entry.date]["logs"].get(entry.exercise):
                out[entry.date]["logs"][entry.exercise] = []

            out[entry.date]["logs"][entry.exercise].append(entry)

        # Sessions
        for entry in sessions:
            if not out.get(entry.date):
                out[entry.date] = {"date": entry.date, "workout": entry.workout, "session": entry, "logs": {}}

        cache.set(cache_mapper.get_workout_log_list(log_hash), out)
    return out
Example #27
0
class SymbolTable(object):
    def __init__(self, parent):
        self.parent_table = parent
        self.name = ""
        if parent:
            self.cur_depth = self.parent_table.cur_depth
        else:
            self.cur_depth = 0
        self.context_attributes_stack = []
        self.cur_context_attributes = None
        self.sym_table = OrderedDict()
        self.record = SemanticRecord()
        self.cur_offset = 0

    def create(self):  # subtables
        self.cur_context_attributes = ContextAttrs()
        self.cur_depth += 1

    def create_root(self):  # Root table
        self.cur_context_attributes = ContextAttrs()

    def destroy(self):
        self.cur_lexeme = ""
        for lexeme in self.cur_context_attributes.context_lexemes:
            self.cur_entry = self.find(lexeme)
            if self.cur_entry:
                self.cur_entry.back_out()
                if self.cur_entry.semantic_record_stack is None:
                    del self.sym_table[lexeme]

    def insert(self, record):
        self.record = record
        self.cur_context_attributes.context_lexemes.append(self.record.lexeme)
        self.record.offset = self.cur_offset
        self.cur_offset += self.record.size
        self.existing_entry = self.find(self.record.lexeme)
        if self.existing_entry is None:
            self.new_entry = SemanticEntry()
            #   self.record.depth = 0
            self.new_entry.put(self.record)
            self.sym_table[self.record.lexeme] = self.new_entry
        else:
            self.record.depth = self.existing_entry.depth
            self.existing_entry.put(self.record)

    def find(self, lexeme):
        if self.sym_table.get(lexeme, None):
            return self.sym_table.get(lexeme, None)
        elif self.parent_table and self.parent_table.sym_table.get(lexeme, None):
            return self.parent_table.find(lexeme)
        else:
            return None

    def __repr__(self):
        output = "\n[%7s |%7s |%7s |%7s |%7s |%7s ]" % ("lexeme", "kind", "type", "size", "offset", "depth")
        for k, v in self.sym_table.items():
            output = output + "\n" + v.cur_record.__repr__()
        return output
Example #28
0
class PipFreeze(object):
    def __init__(self, pip_freeze_output):
        self._load_pip_freeze(pip_freeze_output)
        self.deprecated = []

    def _load_pip_freeze(self, pip_freeze_output):
        self._packages = OrderedDict()

        packages = _parse_pip_freeze(pip_freeze_output)
        for package in packages:
            self._packages[package.id] = package

    def __contains__(self, requirement):
        requirement = _to_requirement(requirement)
        package = self._packages.get(requirement.id)
        return package in requirement

    def __getitem__(self, package_name):
        requirement = _to_requirement(package_name)
        package = self._packages[requirement.id]
        return package

    def get(self, requirement, default=None):
        try:
            return self[requirement]
        except KeyError:
            return default

    def __iter__(self):
        for package in self._packages.values():
            yield package

    def __len__(self):
        return len(self._packages)

    def __nonzero__(self):
        # Python 2.x
        return self.__bool__()  # pragma: no cover

    def __bool__(self):
        # Python 3.x
        return bool(self._packages)

    def load_requirements(self, requirements):
        for req in requirements["requirements"]:
            requirement = Requirement(req["requirement"])
            package = self._packages.get(requirement.id)
            if package is None:
                continue
            if package not in requirement:
                deprecate_kwargs = {"deprecated_by": requirement}
                if "reason" in req:
                    deprecate_kwargs["reason"] = req["reason"]
                if "severity" in req:
                    deprecate_kwargs["severity"] = req["severity"]
                package.deprecate(**deprecate_kwargs)
                self.deprecated.append(package)
Example #29
0
class MarketState(Algorithm):
    # returns a di
    def initialize(self, *args, **kwargs):
        self.sid = kwargs.get("sid", "SPY")
        self.id = "Study for output (%s)" % (self.sid)
        self.desc = "Output this study"
        self.ignore_old = False
        self.initialize_recorder(
            ["state"],
            False,
            "J:/LanahanMain/code_projects/quant_sim/quant_sim/reporting/records/" + self.id + "record.csv",
        )
        self.add_metric(MA(id="sma10", val=0.0, window=10, func="env['SPY'].c"))
        self.add_metric(MA(id="sma20", val=0.0, window=20, func="env['SPY'].c"))
        self.add_metric(MA(id="sma50", val=0.0, window=50, func="env['SPY'].c"))
        self.add_metric(MA(id="sma200", val=0.0, window=200, func="env['SPY'].c"))
        self.add_metric(Max(id="H10", val=0.0, cache_n=2, window=10, func="env['SPY'].c"))
        self.add_metric(Max(id="H20", val=0.0, cache_n=2, window=20, func="env['SPY'].c"))
        self.add_metric(Max(id="H50", val=0.0, cache_n=2, window=50, func="env['SPY'].c"))
        self.add_metric(Min(id="L10", val=0.0, cache_n=2, window=10, func="env['SPY'].c"))
        self.add_metric(Min(id="L20", val=0.0, cache_n=2, window=20, func="env['SPY'].c"))
        self.add_metric(Min(id="L50", val=0.0, cache_n=2, window=50, func="env['SPY'].c"))
        self.add_metric(Max(id="iH10", val=0.0, cache_n=2, window=10, func="env['SPY'].h"))
        self.add_metric(Max(id="iH20", val=0.0, cache_n=2, window=20, func="env['SPY'].h"))
        self.add_metric(Max(id="iH50", val=0.0, cache_n=2, window=50, func="env['SPY'].h"))
        self.add_metric(Min(id="iL10", val=0.0, cache_n=2, window=10, func="env['SPY'].l"))
        self.add_metric(Min(id="iL20", val=0.0, cache_n=2, window=20, func="env['SPY'].l"))
        self.add_metric(Min(id="iL50", val=0.0, cache_n=2, window=50, func="env['SPY'].l"))
        self.pod = OrderedDict()

    def process_data(self, env, *args, **kwargs):
        eod0 = env.get(self.sid, 0)
        eod1 = env.get(self.sid, 1)
        nod = state_of_market(self.pod, env, eod0, eod1)
        nod["H10"] = 1 if eod0.c > self.metrics.funcs["H10"].cache[1] else 0
        nod["H20"] = 1 if eod0.c > self.metrics.funcs["H20"].cache[1] else 0
        nod["H50"] = 1 if eod0.c > self.metrics.funcs["H50"].cache[1] else 0
        nod["L10"] = 1 if eod0.c < self.metrics.funcs["L10"].cache[1] else 0
        nod["L20"] = 1 if eod0.c < self.metrics.funcs["L20"].cache[1] else 0
        nod["L50"] = 1 if eod0.c < self.metrics.funcs["L50"].cache[1] else 0
        nod["iH10"] = 1 if eod0.c > self.metrics.funcs["iH10"].cache[1] else 0
        nod["iH20"] = 1 if eod0.c > self.metrics.funcs["iH20"].cache[1] else 0
        nod["iH50"] = 1 if eod0.c > self.metrics.funcs["iH50"].cache[1] else 0
        nod["iL10"] = 1 if eod0.c < self.metrics.funcs["iL10"].cache[1] else 0
        nod["iL20"] = 1 if eod0.c < self.metrics.funcs["iL20"].cache[1] else 0
        nod["iL50"] = 1 if eod0.c < self.metrics.funcs["iL50"].cache[1] else 0
        nod["sma10"] = 1 if eod0.c > self.metrics["sma10"] else 2
        nod["sma20"] = 1 if eod0.c > self.metrics["sma20"] else 2
        nod["sma50"] = 1 if eod0.c > self.metrics["sma50"] else 2
        nod["sma200"] = 1 if eod0.c > self.metrics["sma200"] else 2
        nod["sma10-consec"] = self.pod.get("sma10-consec", 0) + 1 if nod["sma10"] == self.pod.get("sma10", 0) else 1
        nod["sma20-consec"] = self.pod.get("sma20-consec", 0) + 1 if nod["sma20"] == self.pod.get("sma20", 0) else 1
        nod["sma50-consec"] = self.pod.get("sma50-consec", 0) + 1 if nod["sma50"] == self.pod.get("sma50", 0) else 1
        nod["sma200-consec"] = self.pod.get("sma200-consec", 0) + 1 if nod["sma200"] == self.pod.get("sma200", 0) else 1
        self.record({"state": "  :  ".join(["%s-%d" % (k, v) for k, v in nod.items() if v > 0])})
        self.pod = nod
Example #30
0
    def do_theano(self):
        """ Compiles all theano functions needed to use the model"""

        init_names = dir(self)

        ###### All fields you don't want to get pickled (e.g., theano functions) should be created below this line
        # SAMPLING: NEGATIVE PHASE
        neg_updates = self.neg_sampling_updates(n_steps=self.neg_sample_steps, use_pcd=not self.flags["use_cd"])

        # determing maximum likelihood cost
        ml_cost = self.ml_cost(pos_v=self.input, neg_v=neg_updates[self.neg_v])
        main_cost = [ml_cost, self.get_sparsity_cost(), self.get_reg_cost(self.l2, self.l1)]

        ##
        # COMPUTE GRADIENTS WRT. TO ALL COSTS
        ##
        learning_grads = utils_cost.compute_gradients(*main_cost)

        ##
        # BUILD UPDATES DICTIONARY
        ##
        learning_updates = utils_cost.get_updates(learning_grads, self.lr, multipliers=self.lr_mults_shrd)
        learning_updates.update(neg_updates)
        learning_updates.update({self.iter: self.iter + 1})

        # build theano function to train on a single minibatch
        self.batch_train_func = function([self.input], [], updates=learning_updates, name="train_rbm_func")

        # enforce constraints function
        constraint_updates = OrderedDict()
        ## clip parameters to maximum values (if applicable)
        for (k, v) in self.clip_max.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            constraint_updates[param] = T.clip(param, param, v)
        ## clip parameters to minimum values (if applicable)
        for (k, v) in self.clip_min.iteritems():
            assert k in [param.name for param in self.params()]
            param = getattr(self, k)
            constraint_updates[param] = T.clip(constraint_updates.get(param, param), v, param)
        # constraint filters to have unit norm
        if self.flags.get("weight_norm", None):
            wv = constraint_updates.get(self.Wv, self.Wv)
            wv_norm = T.sqrt(T.sum(wv ** 2, axis=0))
            if self.flags["weight_norm"] == "unit":
                constraint_updates[self.Wv] = wv / wv_norm
            elif self.flags["weight_norm"] == "max_unit":
                constraint_updates[self.Wv] = wv / wv_norm * T.minimum(wv_norm, 1.0)
        self.enforce_constraints = theano.function([], [], updates=constraint_updates)

        ###### All fields you don't want to get pickled should be created above this line
        final_names = dir(self)
        self.register_names_to_del([name for name in (final_names) if name not in init_names])

        # Before we start learning, make sure constraints are enforced
        self.enforce_constraints()