示例#1
0
 def format_response(self, response, in_trash):
     """
     Helper function that formats comments and other annotations
     :dict response: response from server
     :bool comment: whether the response is from collect_comments
     :dict return: response organized by media uri
     """
     # Create flat list of all nodes
     all_nodes = []
     for node in util.strip(response):
         all_nodes.extend(util.strip(node))
     # Create dictionary with 'title' as key and other fields as values
     cleaned = {}
     for node in all_nodes:
         node = node["node"]
         title = node.pop("title")
         cleaned[title] = node
     # Create single-nest dictionary from 'cleaned' with 'uri' as key
     reorganized = {}
     for k, v in cleaned.items():
         if in_trash or not v["archived"]:
             sub_dict = {}
             sub_dict['status'] = v["status"]
             tags = []
             for node in v["tags"]['edges']:
                 tags.append(node['node']['tag_text'])
             sub_dict['tags'] = tags
             sub_dict['last_updated'] = util.epoch_to_datetime(
                 v["updated_at"])
             sub_dict['notes'] = self.collect_comments(v["dbid"])
             sub_dict['last_updated_by'] = v[
                 'dynamic_annotations_verification_status']['edges'][0][
                     'node']['annotator']['name']
             reorganized[v["media"]['url'][-11:]] = sub_dict
     return reorganized
示例#2
0
    def build(cls, line, idx=0):
        line = line.strip()
        cl = cls(idx=idx, raw=line)

        if line is None or len(line.strip()) == 0:
            cl.ltype = CONFIG_LINE_BLANK
            return cl

        cmd_match = re.match(
            r'^\s*(#)?\s*([a-zA-Z0-9\-_.]+)\s*=\s*(.+?)(\s*#.+)??$', line)
        cmt_match = re.match(r'^\s*#.*', line)

        if cmd_match is None and cmt_match is not None:
            cl.ltype = CONFIG_LINE_COMMENT
            return cl

        if cmd_match is None:
            logger.debug('Unrecognized config line: %s' % line)
            cl.ltype = CONFIG_LINE_COMMENT
            return cl

        cl.ltype = CONFIG_LINE_CMD if cmd_match.group(
            1) is None else CONFIG_LINE_CMD_COMMENT
        cl.cmd = util.strip(cmd_match.group(2))
        cl.params = util.strip(cmd_match.group(3))
        cl.comment = util.strip(cmd_match.group(4))
        return cl
    def test_strip(self):
        sample_query = '''query {
          team(slug: "ischool-hrc") {
            projects {
              edges {
                node {
                  title
                  id
                  dbid
                }
              }
            }
          }
        }
        '''
        response = self.meedan_api.execute(sample_query)
        nodes = util.strip(response)
        self.assertEqual(nodes, xr_nodes, "Strip function has failed")

        sample_dict = {
            "data": [{
                "team": {
                    "edges": [{
                        "node": {
                            "name": "Nicole"
                        }
                    }]
                }
            }, {
                "team": {
                    "edges": [{
                        "node": {
                            "name": "Nicole"
                        }
                    }]
                }
            }]
        }
        expected_list = [{
            'node': {
                'name': 'Nicole'
            }
        }, {
            'node': {
                'name': 'Nicole'
            }
        }]
        self.assertEqual(util.strip(sample_dict), expected_list,
                         "Strip function failed on nested dictionaries")
示例#4
0
    def get_proj_id(self, proj_dbid):
        """
        Given a project list id or title, returns a string form of the list id formatted for GraphQL query.
        :param proj_dbid: str or int, either the name of the list or the project dbid
        :str return: project dbid
        """
        if isinstance(proj_dbid, str):
            # queries for project names and their associated ID
            proj_query = '''query {
              team(slug: "%s") {
                projects {
                  edges {
                    node {
                      title
                      id
                      dbid
                    }
                  }
                }
              }
            }
            ''' % (self.slug)
            response = self.execute(proj_query)

            # Extract list of projects
            proj_nodes = util.strip(response)

            # Create new dictionary where the project titles are the keys
            proj_dict = util.pivot_dict(proj_nodes, "title", "dbid")
            proj_dbid = proj_dict[proj_dbid]
        return str(proj_dbid)
示例#5
0
def validate_comment(text):
	length = len(util.strip(text))

	if length == 0 or length > 512:
		return False

	return True
示例#6
0
def owner(row):
    soup = BeautifulSoup(row, 'html.parser')
    repository_owner = str(soup.find("h1", {"class": "h3 lh-condensed"}))
    repository = 'https://github.com/' + strip(
        extract_repository_name(repository_owner))
    data = {'repository': repository}
    return data
示例#7
0
def validate_string(regex, value):
	value = util.strip(value)

	if not regex.match(value) is None:
		return True

	return False
示例#8
0
def process(jpg_data, box, texts):
    x_local = util.decode_jpg(jpg_data, crop_to_box=box)
    # hack: scale the box down
    x_global, box = util.decode_jpg(jpg_data, box)
    text = util.strip(random.choice(texts))
    indices = words.indices(text)
    idx = np.random.randint(0, len(indices))
    x_words = util.left_pad(indices[:idx][-MAX_WORDS:])
    y = util.onehot(indices[idx])
    x_ctx = img_ctx(box)
    return [x_global, x_local, x_words, x_ctx], y
示例#9
0
def evaluate(model, x_global, x_local, x_ctx, box, texts, temperature=.0):
    candidate, likelihood = predict(model, x_global, x_local, x_ctx, box,
                                    temperature)
    candidate = util.strip(candidate)
    references = map(util.strip, texts)
    print("{} {} ({})".format(likelihood, candidate, references[0]))
    scores = {}
    scores['bleu1'], scores['bleu2'] = bleu(candidate, references)
    scores['rouge'] = rouge(candidate, references)
    scores['likelihood'] = likelihood
    return scores
示例#10
0
文件: staff.py 项目: jworr/scheduler
def add(name, roleId, phone, street, city, state, zipCode, isActive=False, oldName=None):
	"""
	Adds or updates a staff member with the information given
	"""
	
	results = process.ERROR_PAGE
	connection = services.getConnection()
	
	try:
		roleId = int(roleId)
	except:
		validData = False
	
	isActive = isActive == "on"	
	
	phone = phone.replace("(", "").replace(")","").replace("-","").replace(" ","")
	name = util.strip(name)
	street = util.strip(street)
	state = util.strip(state).upper()
	zipCode = util.strip(zipCode)

	validData = (len(zipCode) == 0 or len(zipCode) == 5) and (len(state) == 0 or len(state) == 2)
	
	if oldName:
		oldName = util.strip(oldName)
		validData = validData and services.nameExists(connection, oldName)
	
	#if the data is valid then flush it to the database
	if validData:
		
		staff = model.staff.StaffMember(name, roleId, isActive, street, city, state, zipCode, phone)
		
		staff.flush(connection, oldName)
		 
		results = REDIRECT
	
	connection.close()
	
	return results
示例#11
0
def get_last_doing():
    with open(settings.logf, 'r') as f:
        last = list(f)[-1]
    respm = re.search(r'''
^
    \d+      # Timestamp
    \s+      # Spaces after timestamp
    (.*)         # Om nom nom
                      ''', last, re.X)

    if not respm:
        print("ERROR: Failed to find any tags for ditto function. "
              "Last line in TagTime log:\n", last, file=sys.stderr)
        sys.exit(1)
    return util.strip(respm.group(1)).strip()  # remove comments and timestamps
示例#12
0
文件: staff.py 项目: jworr/scheduler
def delete(name):
	"""
	Deletes the staff member identified by their name
	"""
	results = process.ERROR_PAGE
	connection = services.getConnection()
	
	name = util.strip(name)
	
	#if the name exsits then delete it
	if services.nameExists(connection, name):
	
		services.deleteStaff(connection, name)
		results = REDIRECT	
	
	return results
示例#13
0
def process(jpg_data, box, texts, **params):
    max_words = params['max_words']
    x_local = util.decode_jpg(jpg_data, crop_to_box=box)
    # hack: scale the box down
    x_global, box = util.decode_jpg(jpg_data, box)
    text = util.strip(random.choice(texts))
    indices = words.indices(text)
    idx = np.random.randint(1, len(indices))
    x_indices = indices[:idx]
    if len(x_indices) > max_words:
        x_indices = x_indices[-max_words:]

    x_indices = util.left_pad(x_indices, **params)
    y = util.onehot(indices[idx])

    x_ctx = img_ctx(box)
    return [x_global, x_local, x_indices, x_ctx], y, box
示例#14
0
def evaluate(model,
             x_global,
             x_local,
             x_ctx,
             box,
             texts,
             verbose=True,
             **params):
    if verbose:
        img = x_global - x_global.min()
        util.show(x_local)
        util.show(img, box=box)
    candidate = predict(model, x_global, x_local, x_ctx, box, **params)
    candidate = util.strip(candidate)
    references = map(util.strip, texts)
    #print("{} {} ({})".format(likelihood, candidate, references[0]))
    return candidate, references
def get_annotation_for_key(key):
    grefexp = json.loads(conn.get(key))
    anno_key = 'coco2014_anno_{}'.format(grefexp['annotation_id'])
    anno = json.loads(conn.get(anno_key))
    img_key = 'coco2014_img_{}'.format(anno['image_id'])
    img_meta = json.loads(conn.get(img_key))

    jpg_data = open(os.path.join(DATA_DIR, img_meta['filename'])).read()

    x0, y0, width, height = anno['bbox']
    box = (x0, x0 + width, y0, y0 + height)
    texts = [g['raw'] for g in grefexp['refexps']]

    texts = [spell(strip(t, strip_end=False)) for t in texts]

    category = categories[anno['category_id']]
    return jpg_data, box, texts
示例#16
0
 def collect_comments(self, dbid):
     """
     Helper function that gets comments on a project_media
     :int dbid: Meedan's dbid identifier for a particular piece of content
     :list return: text of each comment
     """
     query_string = """query {
       project_media(ids: "%s") {
         annotations(annotation_type: "comment") {
           edges {
             node {
               ... on Comment {
                 text
               }
             }
           }
         }
       }
     }""" % (str(dbid))
     response = self.execute(query_string)
     text = [edge['node']['text'] for edge in util.strip(response)]
     return text
示例#17
0
    #   if(-e $tskf) {
    #     if(open(F, "<$tskf")) {
    #       %tags = ();  # empty the hash first.
    #       while(<F>) {
    #         if(/^\-{4,}/ || /^x\s/i) { last; }
    #         if(/^(\d+)\s+\S/) { $tags{$1} = gettags($_); }
    #       }
    #       close(F);
    #     } else {
    #       print "ERROR: Can't read task file ($tskf) again\n";
    #       $eflag++;
    #     }
    #   }
    # XXX task file

    tagstr = util.strip(resp).strip()
    comments = util.stripc(resp).strip()
    #tagstr = re.sub(r'\b(\d+)\b', lambda m)
    #$tagstr =~ s//($tags{$1} eq "" ? "$1" : "$1 ").$tags{$1}/eg;
    #$tagstr =~ s/\b(\d+)\b/tsk $1/;
    tagstr += autotags
    tagstr = re.sub(r'\s+', ' ', tagstr)
    a = util.annotime("{} {} {}".format(t, tagstr, comments), t)
    if (not tagstr) or\
        (not settings.enforcenums or re.search(r'\b(\d+|non|afk)\b', tagstr)):
        # if enforcenums is enabled, requires a digit or "non" or "afk" to end
        break

print(a)
logger.log(a)
示例#18
0
def add( date, time, staff, location, desc, addToSupport=False, addToMedical=False, \
oldName=None, oldTime=None, oldDate=None, **kwargs):
	"""
	Checks the given data and if it looks valid then add/update the appointment
	date - the date of the appointment
	time - the time of the appointment
	location - the location
	desc - generic text about the appt
	oldName - the previous person the appt was for
	oldTime - the previous time of the appt
	oldDate - the previous date of the appt
	addToSupport - copy all the information to the support appointment at the
					same time and place
	addToMedical - copy all the information to the medical appointment at the
					same time and place
	kwargs - a collection of optional data (the names of the form field match 
				the names of the appointment fields
	"""
	result = None
	oldTimestamp = None
	date = util.strip(date)
	name = util.strip(staff)
	location = util.strip(location)
	supportAppts = None
	medicalAppts = None

	#set all the optional parameters to None if they are an empty string
	for key,value in kwargs.items():
		if value == "":
			kwargs[key] = None
	
	conn = services.getConnection()
	datetime = util.toDatetime(date, time)

	#check the required data for errors
	validData = services.locationExists(conn, location) \
	and datetime != None and services.nameExists(conn, name) \
	and checkDate(kwargs["lastPeriod"]) \
	and checkDate(kwargs["dateConfirmed"])
	
	#check the old name of the appointment if one was given	
	if oldName:
		oldName= util.strip(oldName)
		validData = validData and services.nameExists(conn, oldName)
	
	#assemble the old timestamp for the appointment if all the information was given
	if oldTime and oldDate:
		oldTimestamp = util.toDatetime(util.strip(oldDate), util.strip(oldTime))
		validData = validData and oldTimestamp != None
	
	#if we are not performing an update then check to see if the new appointment
	#time and person are not already taken
	if not (oldTimestamp == datetime and oldName == staff):
		validData= validData and not services.getAppointment(conn, datetime, staff)

	if addToSupport:
		supportAppts = services.getAppsAtTime(conn, datetime, location, model.role.SUPPORT)
		validData = validData and len(supportAppts)

	if addToMedical:
		medicalAppts = services.getAppsAtTime(conn, datetime, location, model.role.MEDICAL)
		validData = validData and len(medicalAppts)

	#if the data was valid than save it to the database	
	if validData:

		otherAppts = []

		#create the appointment object
		appointment = Appointment(datetime, name, location, None, desc, **kwargs)
		
		#flush the appointment object to the database
		appointment.flush(conn, oldTimestamp, oldName)

		if addToSupport:
			otherAppts += supportAppts	

		if addToMedical:
			otherAppts += medicalAppts

		#add the one extra field that needs to be updated as well
		kwargs["description"] = desc

		#update all the other appointments
		for appt in otherAppts:
			model.updateAll(appt, kwargs)
			appt.flush(conn, appt.time, appt.staffName)

		#set the redirect for the brower
		result = REDIRECT % (date, datetime.weekday())
		
	#else show an error page
	else:
		result = process.ERROR_PAGE

	conn.close()
		
	return result
示例#19
0
def push(repo, dest, force, revs):
    """push revisions starting at a specified head back to Subversion.
    """
    assert not revs, 'designated revisions for push remains unimplemented.'
    cmdutil.bailifchanged(repo)
    checkpush = getattr(repo, 'checkpush', None)
    if checkpush:
        try:
            # The checkpush function changed as of e10000369b47 (first
            # in 3.0) in mercurial
            from mercurial.exchange import pushoperation
            pushop = pushoperation(repo, dest, force, revs, False)
            checkpush(pushop)
        except (ImportError, TypeError):
            checkpush(force, revs)

    ui = repo.ui
    old_encoding = util.swap_out_encoding()

    try:
        hasobsolete = (obsolete._enabled or
                       obsolete.isenabled(repo, obsolete.createmarkersopt))
    except:
        hasobsolete = False

    temporary_commits = []
    obsmarkers = []
    try:
        # TODO: implement --rev/#rev support
        # TODO: do credentials specified in the URL still work?
        svn = dest.svn
        meta = repo.svnmeta(svn.uuid, svn.subdir)

        # Strategy:
        # 1. Find all outgoing commits from this head
        if len(repo[None].parents()) != 1:
            ui.status('Cowardly refusing to push branch merge\n')
            return 0 # results in nonzero exit status, see hg's commands.py
        workingrev = repo[None].parents()[0]
        workingbranch = workingrev.branch()
        ui.status('searching for changes\n')
        hashes = meta.revmap.hashes()
        outgoing = util.outgoing_revisions(repo, hashes, workingrev.node())
        if not (outgoing and len(outgoing)):
            ui.status('no changes found\n')
            return 1 # so we get a sane exit status, see hg's commands.push

        tip_ctx = repo[outgoing[-1]].p1()
        svnbranch = tip_ctx.branch()
        modified_files = {}
        for i in range(len(outgoing) - 1, -1, -1):
            # 2. Pick the oldest changeset that needs to be pushed
            current_ctx = repo[outgoing[i]]
            original_ctx = current_ctx

            if len(current_ctx.parents()) != 1:
                ui.status('Found a branch merge, this needs discussion and '
                          'implementation.\n')
                # results in nonzero exit status, see hg's commands.py
                return 0

            # 3. Move the changeset to the tip of the branch if necessary
            conflicts = False
            for file in current_ctx.files():
                if file in modified_files:
                    conflicts = True
                    break

            if conflicts or current_ctx.branch() != svnbranch:
                util.swap_out_encoding(old_encoding)
                try:
                    def extrafn(ctx, extra):
                        extra['branch'] = ctx.branch()

                    ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx))
                    hgrebase.rebase(ui, repo,
                                    dest=node.hex(tip_ctx.node()),
                                    rev=[node.hex(current_ctx.node())],
                                    extrafn=extrafn, keep=True)
                finally:
                    util.swap_out_encoding()

                # Don't trust the pre-rebase repo and context.
                repo = getlocalpeer(ui, {}, meta.path)
                meta = repo.svnmeta(svn.uuid, svn.subdir)
                hashes = meta.revmap.hashes()
                tip_ctx = repo[tip_ctx.node()]
                for c in tip_ctx.descendants():
                    rebasesrc = c.extra().get('rebase_source')
                    if rebasesrc and node.bin(rebasesrc) == current_ctx.node():
                        current_ctx = c
                        temporary_commits.append(c.node())
                        break

            # 4. Push the changeset to subversion
            tip_hash = hashes[tip_ctx.node()][0]
            try:
                ui.status('committing %s\n' % current_ctx)
                pushedrev = pushmod.commit(ui, repo, current_ctx, meta,
                                           tip_hash, svn)
            except pushmod.NoFilesException:
                ui.warn("Could not push revision %s because it had no changes "
                        "in svn.\n" % current_ctx)
                return

            # This hook is here purely for testing.  It allows us to
            # onsistently trigger hit the race condition between
            # pushing and pulling here.  In particular, we use it to
            # trigger another revision landing between the time we
            # push a revision and pull it back.
            repo.hook('debug-hgsubversion-between-push-and-pull-for-tests')

            # 5. Pull the latest changesets from subversion, which will
            # include the one we just committed (and possibly others).
            r = pull(repo, dest, force=force, meta=meta)
            assert not r or r == 0

            # 6. Move our tip to the latest pulled tip
            for c in tip_ctx.descendants():
                if c.node() in hashes and c.branch() == svnbranch:
                    if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum:
                        # This is corresponds to the changeset we just pushed
                        if hasobsolete:
                            obsmarkers.append([(original_ctx, [c])])

                    tip_ctx = c

                    # Remember what files have been modified since the
                    # whole push started.
                    for file in c.files():
                        modified_files[file] = True

            # 7. Rebase any children of the commit we just pushed
            # that are not in the outgoing set
            for c in original_ctx.children():
                if not c.node() in hashes and not c.node() in outgoing:
                    util.swap_out_encoding(old_encoding)
                    try:
                        # Path changed as subdirectories were getting
                        # deleted during push.
                        saved_path = os.getcwd()
                        os.chdir(repo.root)

                        def extrafn(ctx, extra):
                            extra['branch'] = ctx.branch()

                        ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx))
                        needs_rebase_set = "%s::" % node.hex(c.node())
                        hgrebase.rebase(ui, repo,
                                        dest=node.hex(tip_ctx.node()),
                                        rev=[needs_rebase_set],
                                        extrafn=extrafn,
                                        keep=not hasobsolete)
                    finally:
                        os.chdir(saved_path)
                        util.swap_out_encoding()


        util.swap_out_encoding(old_encoding)
        try:
            hg.update(repo, repo.branchtip(workingbranch))
        finally:
            util.swap_out_encoding()

        with repo.wlock():
            with repo.lock():
                if hasobsolete:
                    for marker in obsmarkers:
                        obsolete.createmarkers(repo, marker)
                        beforepush = marker[0][0]
                        afterpush = marker[0][1][0]
                        ui.note('marking %s as obsoleted by %s\n' %
                                (beforepush.hex(), afterpush.hex()))
                else:
                    # strip the original changesets since the push was
                    # successful and changeset obsolescence is unavailable
                    util.strip(ui, repo, outgoing, "all")
    finally:
        try:
            # It's always safe to delete the temporary commits.
            # The originals are not deleted unless the push
            # completely succeeded.
            if temporary_commits:
                # If the repo is on a temporary commit, get off before
                # the strip.
                parent = repo[None].p1()
                if parent.node() in temporary_commits:
                    hg.update(repo, parent.p1().node())
                with repo.wlock():
                    with repo.lock():
                        if hasobsolete:
                            relations = (
                                (repo[n], ()) for n in temporary_commits)
                            obsolete.createmarkers(repo, relations)
                        else:
                            util.strip(
                                ui, repo, temporary_commits, backup=None)

        finally:
            util.swap_out_encoding(old_encoding)
    return 1 # so we get a sane exit status, see hg's commands.push
示例#20
0
def main():
    ping = hours_per_ping = settings.gap / 3600

    if len(sys.argv) != 3 or settings.beemauth is None:
        usage()
    ttlf = sys.argv[1]     # tagtime log filename
    usrslug = sys.argv[2]  # like alice/weight
    m = re.search(r"^(?:.*?(?:\.\/)?data\/)?([^\+\/\.]*)[\+\/]([^\.]*)", usrslug)
    if not m:
        usage()
    usr, slug = m.groups();

    beem = beemapi.Beeminder(settings.beemauth, usr)

    # beef = bee file (cache of data on bmndr)
    beef = os.path.join(settings.path, '{}+{}.bee'.format(usr, slug))

    #if(defined(@beeminder)) { # for backward compatibility
    #  print "Deprecation warning: Get your settings file in line!\n";
    #  print "Specifically, 'beeminder' should be a hash, not an arry.\n";
    #  for(@beeminder) {
    #    @stuff = split(/\s+/, $_); # usrslug and tags
    #    $us = shift(@stuff);
    #    $beeminder{$us} = [@stuff];
    #  }
    #}

    crit = settings.beeminder.get(usrslug)
    if crit is None:
        raise ValueError("Can't determine which tags match {}".format(usrslug))

    # ph (ping hash) maps "y-m-d" to number of pings on that day.
    # sh (string hash) maps "y-m-d" to the beeminder comment string for that day.
    # bh (beeminder hash) maps "y-m-d" to the bmndr ID of the datapoint on that day.
    # ph1 and sh1 are based on the current tagtime log and
    # ph0 and sh0 are based on the cached .bee file or beeminder-fetched data.
    ph = defaultdict(int)
    sh = defaultdict(str)
    bh = {}
    ph1 = defaultdict(int)
    sh1 = defaultdict(str)
    ph0 = defaultdict(int)
    sh0 = defaultdict(str)
    start = time.time()   # start and end are the earliest and latest times we will
    end   = 0             # need to care about when updating beeminder.
    # bflag is true if we need to regenerate the beeminder cache file. reasons we'd
    # need to: 1. it doesn't exist or is empty; 2. any beeminder IDs are missing
    # from the cache file; 3. there are multiple datapoints for the same day.
    try:
        bflag = not os.stat(beef).st_size
    except FileNotFoundError:
        bflag = True
    bf1 = False
    bf2 = False
    bf3 = False
    bf4 = False  # why bflag?
    if bflag:
        bf1 = True

    remember = {} # remember which dates we've already seen in the cache file
    try:
        with open(beef, 'r') as B:
            for line in B:
                m = re.search(r'''
                (\d+)\s+		  # year
                (\d+)\s+		  # month
                (\d+)\s+		  # day
                (\S+)\s+		  # value
                "(\d+)			  # number of pings
                (?:[^\n\"\(:]*) # currently the string " ping(s)"
                :                 # the ": " after " pings"
                ([^\[]*)          # the comment string (no brackets)
                (?:\[             # if present,
                bID\:([^\]]*)     # the beeminder ID, in brackets
                \])?              # end bracket for "[bID:abc123]"
                \s*"
                              ''', line, re.VERBOSE)
                # XXX if not m set an error flag and continue
                y, m, d, v, p, c, b = m.groups()
                y = int(y)
                m = int(m)
                d = int(d)
                p = int(p)
                c = c.strip()
                ts = '{:04}-{:02}-{:02}'.format(y, m, d)

                ph0[ts] = p
                #$ph0{$ts} = $p;
                #$c =~ s/\s+$//;
                #m = re.match(r'\s+$/', c)
                sh0[ts] = c
                bh[ts] = b
                t = time.mktime((y, m, d, 0, 0, 0, 0, 0, -1))
                if t < start:
                    start = t
                if t > end:
                    end = t
                if not b:
                    bflag = True
                    bf2 += 1
                    if bf2 == 1:
                        print("Problem with this line in cache file:\n{}".format(line))
                    elif bf2 == 2:
                        print("Additional problems with cache file, which is expected if this "
                              "is your first time updating TagTime with the new Bmndr API.\n")
                if remember.get(ts):
                    bflag = bf3 = True
                remember[ts] = True;
    except IOError:
        bflag = True
        bf4 = True

    if bflag: # re-slurp all the datapoints from beeminder
        ph0 = defaultdict(int)
        sh0 = defaultdict(str)
        bh = {}
        start = time.time() # reset these since who knows what happened to
        end   = 0           # them when we calculated them from the cache file
        # we decided to toss.

        #my $tmp = $beef;  $tmp =~ s/(?:[^\/]*\/)*//; # strip path from filename
        tmp = os.path.basename(beef)
        if bf1:
            print("Cache file missing or empty ({}); recreating... ".format(tmp))
        elif bf2:
            print("Cache file doesn't have all the Bmndr IDs; recreating... ")
        elif bf3:
            print("Cache file has duplicate Bmndr IDs; recreating... ")
        elif bf4:
            print("Couldn't read cache file; recreating... ")
        else:   # this case is impossible
            print("Recreating Beeminder cache ({})[{bf1}{bf2}{bf3}{bf4}]... ".format(
                bf1=bf1, bf2=bf2, bf3=bf3, bf4=bf4
            ))

        data = beem.data(slug)
        print("[Bmndr data fetched]")

        # take one pass to delete any duplicates on bmndr; must be one datapt per day
        #i = 0;
        remember = {}
        newdata = []
        for x in data:
            tm = time.localtime(x["timestamp"])
            y, m, d = tm.tm_year, tm.tm_mon, tm.tm_mday
            timetuple = time.localtime(x['timestamp'])
            # XXX okay so we're using localtime here, but
            # does this change if/when generalized
            # midnight is rolled out, etc?
            ts = time.strftime('%Y-%m-%d', timetuple)
            b = x['id']
            if remember.get(ts) is not None:
                print("Beeminder has multiple datapoints for the same day. "
                      "The other id is {}. Deleting this one:".format(remember[ts]))
                pprint(x)
                beem.delete_point(slug, b)
            else:
                newdata.append(x)
            remember[ts] = b
            #i += 1

        data = newdata
        # for my $x (reverse(@todelete)) {
        #   splice(@$data,$x,1);
        # }
        for x in data:   # parse the bmndr data into %ph0, %sh0, %bh
            timetuple = time.localtime(x['timestamp'])
            y, m, d, *rest = timetuple
            # XXX see note above about generalized midnight
            ts = time.strftime('%Y-%m-%d', timetuple)
            #t = util.pd(ts)     # XXX isn't x['timestamp'] the unix time anyway already
            t = x['timestamp']
            if t < start:
                start = t
            if t > end:
                end = t
            v = x['value']
            c = x['comment']
            b = x['id']
            i = re.search(r'^\d+', c)
            ph0[ts] = int(i.group(0) if i else 0) # ping count is first thing in the comment
            sh0[ts] = re.sub(r'[^:]*:\s+', '', c) # drop the "n pings:" comment prefix
            # This really shouldn't happen.
            if ts in bh:
                raise ValueError(
                    "Duplicate cached/fetched id datapoints for {ts}: {bhts}, {b}.\n{val}".format(
                        ts=ts, bhts=bh[ts], b=b, val=pformat(x)))
            bh[ts] = b

    try:
        with open(ttlf) as T:
            np = 0 # number of lines (pings) in the tagtime log that match
            for line in T: # parse the tagtime log file
                m = re.search(r'^(\d+)\s*(.*)$', line)
                if not m:
                    raise ValueError("Bad line in TagTime log: " + line)
                t = int(m.group(1)) # timestamp as parsed from the tagtime log
                ts = time.localtime(t)
                stuff = m.group(2)  # tags and comments for this line of the log
                tags = util.strip(stuff)
                if tagmatch(tags, crit, ts):
                    #print('found a match for line: {}'.format(line))
                    #y, m, d, *rest = time.localtime(t)
                    ymd = time.strftime('%Y-%m-%d', ts)
                    ph1[ymd] += 1
                    sh1[ymd] += util.stripb(stuff) + ", "
                    np += 1
                    if t < start:
                        start = t
                    if t > end:
                        end = t
    except IOError:
        raise ValueError("Can't open TagTime log file: "+ttlf)


    # clean up $sh1: trim trailing commas, pipes, and whitespace
    # for(sort(keys(%sh1))) { $sh1{$_} =~ s/\s*(\||\,)\s*$//; }
    for key in sorted(sh1.keys()):
        sh1[key] = re.sub(r'\s*(\||,)\s*$', '', sh1[key])

    #print "Processing datapoints in: ", ts($start), " - ", ts($end), "\n";

    nquo  = 0  # number of datapoints on beeminder with no changes (status quo)
    ndel  = 0  # number of deleted datapoints on beeminder
    nadd  = 0  # number of created datapoints on beeminder
    nchg  = 0  # number of updated datapoints on beeminder
    minus = 0  # total number of pings decreased from what's on beeminder
    plus  = 0  # total number of pings increased from what's on beeminder
    ii    = 0
    for t in range(daysnap(start) - 86400, daysnap(end) + 86401, 86400):
        timetuple = time.localtime(t)
        y, m, d, *rest = timetuple
        ts = time.strftime('%Y-%m-%d', timetuple)
        b = bh.get(ts, "")
        p0 = ph0.get(ts, 0)
        p1 = ph1.get(ts, 0)
        s0 = sh0.get(ts, "")
        s1 = sh1.get(ts, "")
        if p0 == p1 and s0 == s1: # no change to the datapoint on this day
            if b:
                nquo += 1
            continue
        if not b and p1 > 0: # no such datapoint on beeminder: CREATE
            nadd += 1
            plus += p1
            point = beem.create_point(slug, value=p1*ping,
                                      timestamp=t, comment=util.splur(p1, 'ping') + ': ' + s1)
            bh[ts] = point['id']
            #print "Created: $y $m $d  ",$p1*$ping," \"$p1 pings: $s1\"\n";
        elif p0 > 0 and p1 <= 0: # on beeminder but not in tagtime log: DELETE
            ndel += 1
            minus += p0
            beem.delete_point(slug, b)
            #print "Deleted: $y $m $d  ",$p0*$ping," \"$p0 pings: $s0 [bID:$b]\"\n";
        elif p0 != p1 or s0 != s1:  # bmndr & tagtime log differ: UPDATE
            nchg += 1
            if p1 > p0:
                plus += p1 - p0
            elif p1 < p0:
                minus += p0 - p1
            beem.update_point(slug, b, value=(p1*ping),
                              timestamp=t,
                              comment=util.splur(p1, 'ping') + ': ' + s1)
            # If this fails, it may well be because the point being updated was deleted/
            # replaced on another machine (possibly as the result of a merge) and is no
            # longer on the server. In which case we should probably fail gracefully
            # rather than failing with an ERROR (see beemupdate()) and not fixing
            # the problem, which requires manual cache-deleting intervention.
            # Restarting the script after deleting the offending cache is one option,
            # though simply deleting the cache file and waiting for next time is less
            # Intrusive. Deleting the cache files when merging two TT logs would reduce
            # the scope for this somewhat.
            #print "Updated:\n";
            #print "$y $m $d  ",$p0*$ping," \"$p0 pings: $s0 [bID:$b]\" to:\n";
            #print "$y $m $d  ",$p1*$ping," \"$p1 pings: $s1\"\n";
        else:
            print("ERROR: can't tell what to do with this datapoint (old/new):\n")
            print(ts, p0 * ping, " \"{p0} pings: {s0} [bID:{b}]\"".format(p0=p0, s0=s0, b=b))
            print(ts, p1 * ping, " \"{p1} pings: {s1}\"\n".format(p1=p1, s1=s1))
    with open(beef, 'w') as f: # generate the new cache file
        for ts in sorted(ph1.keys()):
            y, m, d = re.split(r'-', ts)
            p = ph1[ts]
            v = p * ping
            c = sh1[ts]
            b = bh[ts]
            out = '{y} {m} {d}  {v} "{pings}: {c} [bID:{b}]"\n'.format(
                y=y, m=m, d=d, v=v, pings=util.splur(p, "ping"), c=c, b=b)
            f.write(out)
    nd = len(ph1)                 # number of datapoints
    if nd != nquo + nchg + nadd:  # sanity check
        print("\nERROR: total != nquo+nchg+nadd ({nd} != {nquo}+{nchg}+{nadd})\n".format(
            nd=nd, nquo=nquo, nchg=nchg, nadd=nadd))

    print("Datapts: {nd} (~{nquo} *{nchg} +{nadd} -{ndel}), ".format(
        nd=nd, nquo=nquo, nchg=nchg, nadd=nadd, ndel=ndel),
          "Pings: {np} (+{plus} -{minus}) ".format(np=np, plus=plus, minus=minus))
    if isinstance(crit, str):
        print("w/ tag", crit)
    elif isinstance(crit, list):
        print("w/ tags in {", ','.join(crit), "}")
    elif hasattr(crit, 'search'):
        print('matching', crit.pattern)
    elif callable(crit):
        print('satisfying lambda')
    else:
        print("(unknown-criterion: {crit})".format(crit=crit))
示例#21
0
            err.log("Must specify dependency file")
        deps_filename = args.file

        handler.read_dependency_file(deps_filename)
        handler.read_installed_deps_file()

        if args.check:
            handler.check_only = True
            if not handler.check_dependencies(args.install_group):
                exit(-1)
            exit(0)
        if args.query:
            log.log(args.query + " : version (" +
                    handler.deps_dict[args.query].version.to_string() +
                    ") -- installed at (" +
                    handler.get_prefix(strip(args.query)) + ")")
            exit(0)
        if args.install_dir:
            log.log(handler.get_prefix(strip(args.install_dir)))
            exit(0)
        if args.list:
            for k in handler.installed_deps.copy():
                log.log(k)
            exit(0)
        if args.remove:
            handler.remove_dependency(args.remove, True)
            exit(0)
        if args.remove_all:
            for k in handler.installed_deps.copy():
                handler.remove_dependency(k, False)
            exit(0)
示例#22
0
def push(repo, dest, force, revs):
    """push revisions starting at a specified head back to Subversion.
    """
    assert not revs, 'designated revisions for push remains unimplemented.'
    cmdutil.bailifchanged(repo)
    checkpush = getattr(repo, 'checkpush', None)
    if checkpush:
        try:
            # The checkpush function changed as of e10000369b47 (first
            # in 3.0) in mercurial
            from mercurial.exchange import pushoperation
            pushop = pushoperation(repo, dest, force, revs, False)
            checkpush(pushop)
        except (ImportError, TypeError):
            checkpush(force, revs)

    ui = repo.ui
    old_encoding = util.swap_out_encoding()

    try:
        hasobsolete = obsolete._enabled
    except:
        hasobsolete = False

    temporary_commits = []
    obsmarkers = []
    try:
        # TODO: implement --rev/#rev support
        # TODO: do credentials specified in the URL still work?
        svn = dest.svn
        meta = repo.svnmeta(svn.uuid, svn.subdir)

        # Strategy:
        # 1. Find all outgoing commits from this head
        if len(repo[None].parents()) != 1:
            ui.status('Cowardly refusing to push branch merge\n')
            return 0 # results in nonzero exit status, see hg's commands.py
        workingrev = repo[None].parents()[0]
        workingbranch = workingrev.branch()
        ui.status('searching for changes\n')
        hashes = meta.revmap.hashes()
        outgoing = util.outgoing_revisions(repo, hashes, workingrev.node())
        if not (outgoing and len(outgoing)):
            ui.status('no changes found\n')
            return 1 # so we get a sane exit status, see hg's commands.push

        tip_ctx = repo[outgoing[-1]].p1()
        svnbranch = tip_ctx.branch()
        modified_files = {}
        for i in range(len(outgoing) - 1, -1, -1):
            # 2. Pick the oldest changeset that needs to be pushed
            current_ctx = repo[outgoing[i]]
            original_ctx = current_ctx

            if len(current_ctx.parents()) != 1:
                ui.status('Found a branch merge, this needs discussion and '
                          'implementation.\n')
                # results in nonzero exit status, see hg's commands.py
                return 0

            # 3. Move the changeset to the tip of the branch if necessary
            conflicts = False
            for file in current_ctx.files():
                if file in modified_files:
                    conflicts = True
                    break

            if conflicts or current_ctx.branch() != svnbranch:
                util.swap_out_encoding(old_encoding)
                try:
                    def extrafn(ctx, extra):
                        extra['branch'] = ctx.branch()

                    ui.note('rebasing %s onto %s \n' % (current_ctx, tip_ctx))
                    hgrebase.rebase(ui, repo,
                                    dest=node.hex(tip_ctx.node()),
                                    rev=[node.hex(current_ctx.node())],
                                    extrafn=extrafn, keep=True)
                finally:
                    util.swap_out_encoding()

                # Don't trust the pre-rebase repo and context.
                repo = getlocalpeer(ui, {}, meta.path)
                meta = repo.svnmeta(svn.uuid, svn.subdir)
                hashes = meta.revmap.hashes()
                tip_ctx = repo[tip_ctx.node()]
                for c in tip_ctx.descendants():
                    rebasesrc = c.extra().get('rebase_source')
                    if rebasesrc and node.bin(rebasesrc) == current_ctx.node():
                        current_ctx = c
                        temporary_commits.append(c.node())
                        break

            # 4. Push the changeset to subversion
            tip_hash = hashes[tip_ctx.node()][0]
            try:
                ui.status('committing %s\n' % current_ctx)
                pushedrev = pushmod.commit(ui, repo, current_ctx, meta,
                                           tip_hash, svn)
            except pushmod.NoFilesException:
                ui.warn("Could not push revision %s because it had no changes "
                        "in svn.\n" % current_ctx)
                return

            # This hook is here purely for testing.  It allows us to
            # onsistently trigger hit the race condition between
            # pushing and pulling here.  In particular, we use it to
            # trigger another revision landing between the time we
            # push a revision and pull it back.
            repo.hook('debug-hgsubversion-between-push-and-pull-for-tests')

            # 5. Pull the latest changesets from subversion, which will
            # include the one we just committed (and possibly others).
            r = pull(repo, dest, force=force, meta=meta)
            assert not r or r == 0

            # 6. Move our tip to the latest pulled tip
            for c in tip_ctx.descendants():
                if c.node() in hashes and c.branch() == svnbranch:
                    if meta.get_source_rev(ctx=c)[0] == pushedrev.revnum:
                        # This is corresponds to the changeset we just pushed
                        if hasobsolete:
                            obsmarkers.append([(original_ctx, [c])])

                    tip_ctx = c

                    # Remember what files have been modified since the
                    # whole push started.
                    for file in c.files():
                        modified_files[file] = True

            # 7. Rebase any children of the commit we just pushed
            # that are not in the outgoing set
            for c in original_ctx.children():
                if not c.node() in hashes and not c.node() in outgoing:
                    util.swap_out_encoding(old_encoding)
                    try:
                        # Path changed as subdirectories were getting
                        # deleted during push.
                        saved_path = os.getcwd()
                        os.chdir(repo.root)

                        def extrafn(ctx, extra):
                            extra['branch'] = ctx.branch()

                        ui.status('rebasing non-outgoing %s onto %s\n' % (c, tip_ctx))
                        needs_rebase_set = "%s::" % node.hex(c.node())
                        hgrebase.rebase(ui, repo,
                                        dest=node.hex(tip_ctx.node()),
                                        rev=[needs_rebase_set],
                                        extrafn=extrafn,
                                        keep=not hasobsolete)
                    finally:
                        os.chdir(saved_path)
                        util.swap_out_encoding()


        util.swap_out_encoding(old_encoding)
        try:
            hg.update(repo, repo.branchtip(workingbranch))
        finally:
            util.swap_out_encoding()

        if hasobsolete:
            for marker in obsmarkers:
                obsolete.createmarkers(repo, marker)
                beforepush = marker[0][0]
                afterpush = marker[0][1][0]
                ui.note('marking %s as obsoleted by %s\n' %
                        (beforepush.hex(), afterpush.hex()))
        else:
            # strip the original changesets since the push was
            # successful and changeset obsolescence is unavailable
            util.strip(ui, repo, outgoing, "all")
    finally:
        try:
            # It's always safe to delete the temporary commits.
            # The originals are not deleted unless the push
            # completely succeeded.
            if temporary_commits:
                # If the repo is on a temporary commit, get off before
                # the strip.
                parent = repo[None].p1()
                if parent.node() in temporary_commits:
                    hg.update(repo, parent.p1().node())
                if hasobsolete:
                    relations = ((repo[n], ()) for n in temporary_commits)
                    obsolete.createmarkers(repo, relations)
                else:
                    util.strip(ui, repo, temporary_commits, backup=None)

        finally:
            util.swap_out_encoding(old_encoding)
    return 1 # so we get a sane exit status, see hg's commands.push