Example #1
0
def init(section):
    print color.success_blue('# ') + 'Fetching stories from Backlog and Queue...'
    progress.update(0, 2)
    backlog_stories = filter_not_accepted(section.backlog.fetch_stories())
    backlog_stories_sorted = effort_sorted(backlog_stories)
    progress.update(1, 2)
    queue_backlog = filter_backlog(section.queue.fetch_stories())
    progress.finish()
    
    print color.success_blue('# ') + 'Writing original story order to config file...'
    section.order = map(lambda story: (story.id, story.current_state), backlog_stories)
    section.write_order()
    progress.finish()
    
    def move_to_estimator(story, origin):
        story.description = getattr(story, 'description', '') + '\n' + origin_key + origin
        story.sync_to_project(section.estimator, safe=True)
    
    for story in backlog_stories_sorted:
        if story.story_type == 'release' and story.current_state == 'unscheduled':
            story.current_state = 'unstarted'
            continue
        
        if story.story_type != 'release' and story.current_state in ['unscheduled', 'unstarted']:
            story.current_state = 'started'
            if not hasattr(story, 'estimate') and story.is_estimatable():
                print color.warn('Warning: ') + 'Story %s has no estimate. Adding an estimate of one; this is needed to start the story in Estimator.' % story.name
                story.estimate = 1

    print color.success_blue('# ') + 'Moving stories from Backlog to Estimator...'
    progress.iter(backlog_stories_sorted, move_to_estimator, 'backlog')

    for story in queue_backlog: story.current_state = 'unscheduled'
    print color.success_blue('# ') + 'Moving stories from Queue to Estimator...'
    progress.iter(queue_backlog[::-1], move_to_estimator, 'queue')
Example #2
0
def run(netdef,tosave,modify,procs,thisProc,stims,param,repeats,sim_time,SaveSpikes,SaveVoltage,SaveConductance,SaveCurrent):
    net = netdef()

    if SaveVoltage:
        net.recordVoltage()

    repeats = int(repeats)
    # Randomseed was 200 for most figures
    # Changed to 200 for rat
    # Changed to 200 for anurans
    s = Simulation(net, randomseed=202,delay=25)
    s.verbose = False
    s.sim_time = sim_time
    s.dt = 0.050
    total = len(stims)*len(param)*repeats
    spp = ceil(float(total)/procs)
    start = thisProc*spp
    end = (thisProc+1)*spp
    count = 0
    for a in param: 
        s.set_amplitude(net.sim_amp)
        for d in stims*repeats:
            if count >= start and count < end:
                net = modify(net,a,d)
                progress.update(count-start,spp,thisProc)
                s.stim_dur = d 
                s.run()
                key = [a,d] 
                net.savecells(tosave, key, spikes=SaveSpikes,voltage=SaveVoltage,conductance=SaveConductance,current=SaveCurrent)
            count += 1
    progress.update(spp,spp,thisProc)

    r = [thisProc,net.savedparams,net.savedcells]
    return r
Example #3
0
def do_search(api,db,keyword_query,geocode,from_id,to_id,next_id):
    #r = api.request('statuses/filter', {'locations': '112.5,-37.5,154.1,-12.8'})
    next_id=-1
    cur_id=-1
    if from_id==-1:
        from_id=None
    if to_id==-1:
        to_id=0
    count=0
    pager = TwitterPager(api, 'search/tweets', {'q': keyword_query, 'geocode': geocode,  'count': '100', 'max_id': str(from_id), 'since_id' : str(to_id)})
    while True:
        try:
            for item in pager.get_iterator():
                #print(item)
                if 'text' in item:
                    #try:
                    if True:
                        #print item["id"]
                        cur_id=int(item["id"])
                        #if next_id != -1, we run in re-start mode, don't reset next_id
                        #else we need to update next_id when the first item arrives in this iteration
                        #and next iteration's to_id will be set to next_id of this iteration 
                        if next_id==-1:
                            next_id=cur_id
                        if cur_id<=to_id:
                            break
                        info=get_dict_object_from_tweet(item)
                        if not info:
                            print "Error parsing the tweet, ignore it"
                            continue
                        #put the data in the db
                        db.put(info)
                        count+=1
                        if count % 1000 == 0:
                            print count
                        #print item["id"],"ok"
                        #print(info["post_text"])
                    #persist the progress to ensure we can resume the harvester from here
                    progress.update(cur_id,to_id,next_id)
                elif 'message' in item:
                    # something needs to be fixed before re-connecting
                    raise Exception(item['message'])
            return count
        except TwitterAPI.TwitterError.TwitterRequestError,e:
            if e.status_code==429:
                print ("Too Many Requests, now sleeping...")
                sleep(60)
            else:
                raise e
Example #4
0
def run_session(section):
    if section.has_estimator_id():
        print color.success_blue('# ') + 'Loaded existing Estimator session'
    else:
        print color.success_blue('# ') + 'No existing Estimator session found, starting new session...'
        print color.success_blue('# ') + 'Making backup of Queue and Backlog...'
        backup.backup_project(section.queue)
        progress.update(1, 2)
        backup.backup_project(section.backlog)
        progress.finish()

        try:
            section.estimator = create_estimator(section)
            section.write_estimator_id()
            
            try:
                print color.success_blue('# ') + 'Sharing Estimator with all members in account...'
                share_estimator_with_account(section)
            except Exception:
                pass
        except RuntimeError:
            est_id = raw_input(color.err('ERROR: ') + 'Estimator project already exists, but was not found in the config file. Please enter id of the Estimator project: ')
            section.estimator = piv.Project.load(int(est_id))
            section.write_estimator_id()
        
        section.imported = False
        section.write_imported()
    
    if not section.has_imported():
        stories = section.estimator.fetch_stories()
        if len(stories) != 0:
            print color.success_blue('# ') + 'Estimator project has not imported all the stories from Queue and Backlog; continuing imports...'
            
        init(section)
        section.imported = True
        section.write_imported()
    
    print color.success_blue('# ') + 'Estimator project at: https://www.pivotaltracker.com/n/projects/' + str(section.estimator.id)
    
    query_user_input(section)

    print color.success_blue('# ') + 'Making backup of Estimator...'
    backup.backup_project(section.estimator)
    progress.finish()
    
    finalize(section)
Example #5
0
def processCommits(process_commits):
    global commits

    processed_commits = set()

    for commit_id in process_commits:
        try:
            gitobject = repository.fetch(commits[commit_id])
            if gitobject.type == "commit": processed_commits.add(commit_id)
        except gitutils.GitError:
            pass
        except KeyboardInterrupt:
            sys.exit(1)
        except:
            raise

        progress.update()

    return processed_commits
Example #6
0
def processCommits(process_commits):
    global commits

    processed_commits = set()

    for commit_id in process_commits:
        try:
            gitobject = repository.fetch(commits[commit_id])
            if gitobject.type == "commit": processed_commits.add(commit_id)
        except gitutils.GitError:
            pass
        except KeyboardInterrupt:
            sys.exit(1)
        except:
            raise

        progress.update()

    return processed_commits
Example #7
0
commits = {}
pending_commits = set()

cursor.execute("SELECT COUNT(*) FROM commits")

print

progress.start(cursor.fetchone()[0], prefix="Fetching commits ...")

cursor.execute("SELECT id, sha1 FROM commits")

for commit_id, commit_sha1 in cursor:
    commits[commit_id] = commit_sha1
    pending_commits.add(commit_id)

    progress.update()

progress.end(" %d commits." % len(commits))

print

cursor.execute("SELECT MAX(CHARACTER_LENGTH(name)) FROM repositories")

repository_name_length = cursor.fetchone()[0]

cursor.execute("SELECT id FROM repositories ORDER BY id ASC")

repositories = [repository_id for (repository_id,) in cursor]

def processCommits(process_commits):
    global commits
Example #8
0
                    if len(sha1) == 40 and ref.startswith("refs/heads/"):
                        refs[ref[11:]] = sha1
                except ValueError:
                    pass
    except IOError as error:
        if error.errno == errno.ENOENT: pass
        else: raise

    progress.start(len(branches), "Repository: %s" % repository.name)

    heads_path = os.path.join(repository.path, "refs", "heads")

    branches_in_db = set()

    for branch_id, branch_name, branch_type, branch_base_id, branch_sha1 in branches:
        progress.update()

        branches_in_db.add(branch_name)

        try:
            try: repository_sha1 = open(os.path.join(heads_path, branch_name)).read().strip()
            except: repository_sha1 = refs.get(branch_name)

            if repository_sha1 != branch_sha1:
                progress.write("NOTE[%s]: %s differs (db:%s != repo:%s)" % (repository.name, branch_name, branch_sha1[:8], repository_sha1[:8]))

                if branch_type == "review":
                    head = getReviewHead(repository, getReview(branch_id))

                    if not head:
                        progress.write("  invalid review meta-data: r/%d" % getReview(branch_id))
Example #9
0
repeats = 15
param = [(i*2)+10 for i in range(11)]

total = len(stims)*len(param)*repeats
print "Running %d simulations..." % total
count = 0

for a in param: 
    for net in networks:
        networks["AC"].cells["IC"]["cells"][0].sec["soma"].L = a
        networks["AC"].cells["IC"]["cells"][0].sec["soma"].diam = a
        networks["C"].cells["IC"]["cells"][0].sec["soma"].L = a
        networks["C"].cells["IC"]["cells"][0].sec["soma"].diam = a

    for d in stims*repeats:
        progress.update(count, total)
        count += 1
        for net in networks:
            s[net].stim_dur = d 
            s[net].run()
            key = [a,d] 
            networks[net].savecells([["IC","soma"],["IC","dendE"]], key, spikes=True, 
                                                                         conductance=False, 
                                                                         current=False, 
                                                                         voltage=True)

if True:
    ns.plot_mean_spikes(networks["C"], "IC-soma", "c_soma_size.dat")
    ns.plot_mean_spikes(networks["AC"], "IC-soma", "ac_soma_size.dat")
    ns.show()  # Comment out to just save the results to file
def recommend(user_user_similarities, user_ids):
    """Recommend with the test set

    Recommend with the test set, and return the user_news_rating_predictions array.

    Args:
        user_user_similarities(Type: numpy.ndarray): The similarities between user and user(user_user_similarities[i, j] represents the similarity between user "i" and user "j". Similarity of oneself is "1".)
        user_ids(Type: numpy.ndarray(vector)): user's ids(from small to large) associated with the array

    Returns:
        user_news_rating_predictions(Type: numpy.ndarray): The rating prediction of each user to each news(rating_prediction[i, j] represents the rating prediction of user "i" to news "j".)
        user_ids(Type: numpy.ndarray(vector)): user's ids(from small to large) associated with the array
        news_ids(Type: numpy.ndarray(vector)): news's ids(from small to large) associated with the array
    """

    # get user-news array
    user_news_array_of_train = numpy.load("user_news_array_of_train.npy")
    user_ids_of_train = numpy.load("user_ids_of_train.npy")
    news_ids_of_train = numpy.load("news_ids_of_train.npy")
    user_news_array_of_test = numpy.load("user_news_array_of_test.npy")
    user_ids_of_test = numpy.load("user_ids_of_test.npy")
    news_ids_of_test = numpy.load("news_ids_of_test.npy")
    user_num_of_train = len(user_ids_of_train)
    news_num_of_train = len(news_ids_of_train)
    user_num_of_test = len(user_ids_of_test)
    news_num_of_test = len(news_ids_of_test)

    # find k nearest neighbors of users
    print("Find k nearest neighbors of users started.")
    time_start = time.time()
    neighbor_size = min(20, user_num_of_train-1)  # TODO refine this parameter
    user_neighbors_indexes = numpy.zeros((user_num_of_test, neighbor_size), numpy.int16)
    user_ids_of_train_dict = {user_id: index for (index, user_id) in enumerate(user_ids_of_train)}
    user_index_from_test_to_train_dict = {user_index_in_test: user_ids_of_train_dict[user_id_of_test] for (user_index_in_test, user_id_of_test) in enumerate(user_ids_of_test)}  # dictionary of user index from test to train
    for i in range(user_num_of_test):
        sorted_indexes = numpy.argsort(-user_user_similarities[user_index_from_test_to_train_dict[i], :])
        user_neighbors_indexes[i, :] = sorted_indexes[1:neighbor_size+1]  # choose the first k in the sorted list, remove the first one which is oneself
        if i % 100 == 0:
            # print("%.1f%%" % (i / user_num_of_test * 100))
            progress.update(i / user_num_of_test)
    progress.update(1)
    time_end = time.time()
    print("Find k nearest neighbors of users ended. %f s cost." % (time_end - time_start))

    # predict ratings
    print("Predict ratings started.")
    time_start = time.time()
    news_ids_of_test_dict = {news_id: index for (index, news_id) in enumerate(news_ids_of_test)}
    news_index_from_train_to_test_dict = {news_index_in_train: news_ids_of_test_dict[news_id_of_train] for (news_index_in_train, news_id_of_train) in enumerate(news_ids_of_train)}  # dictionary of news index from train to test
    user_news_array_of_train_expanded = numpy.zeros((user_num_of_train, news_num_of_test), numpy.int8)  # int is faster than bool_
    user_news_array_of_train_expanded[:, [news_index_from_train_to_test_dict[i] for i in range(news_num_of_train)]] = user_news_array_of_train  # expand the column of user_news_array_of_train to the same size as user_news_array_of_test
    user_news_rating_predictions = numpy.zeros((user_num_of_test, news_num_of_test), numpy.float16)
    eps = numpy.finfo(float).eps
    for i in range(user_num_of_test):
        this_user_index_in_test = user_index_from_test_to_train_dict[i]
        similarities_sum = 0.0
        for user_neighbors_index in user_neighbors_indexes[i]:
            user_news_rating_predictions[i] += user_news_array_of_train_expanded[user_neighbors_index] * user_user_similarities[user_neighbors_index, this_user_index_in_test]
            similarities_sum += user_user_similarities[user_neighbors_index, this_user_index_in_test]
        user_news_rating_predictions[i] /= (similarities_sum + eps)
        if i % 100 == 0:
            # print("%.1f%%" % (i / user_num_of_test * 100))
            progress.update(i / user_num_of_test)
    progress.update(1)
    user_news_rating_predictions[user_news_array_of_train_expanded[[user_index_from_test_to_train_dict[i] for i in range(user_num_of_test)], :] == 1] = 0  # remove news one user has clicked
    time_end = time.time()
    print("Predict ratings ended. %f s cost." % (time_end - time_start))

    print("[NMF-User-based Collaborative Filtering] Recommend finished!")

    return user_news_rating_predictions, user_ids_of_test, news_ids_of_test
Example #11
0
#selector = ROOT.TTreeFormula("selector", str(tcut), chain)
for i, entry in enumerate(chain):
    filename = entry.GetFile().GetName()
    if filename != oldfilename:
        #selector.UpdateFormulaLeaves()
        #if selector.EvalInstance() == False: continue
        oldfilename = filename
        if "LT" in filename:
            h2key = pattern.match(filename).group(1)
            if h2key not in lt_h2:
                lt_h2[h2key] = ROOT.TH2F(args.name + h2key,
                                         args.name + ' ' + h2key, ncat, 0,
                                         ncat, ncat, 0, ncat)
        else:
            h2key = None
    progress.update(100. * i / chain.GetEntries())
    if entry.smearerCat[0] >= 0:
        x = cat1[entry.smearerCat[0]]
        y = cat2[entry.smearerCat[0]]
        h2.Fill(x, y)
        if h2key in lt_h2:
            lt_h2[h2key].Fill(x, y)
progress.end()

h2.Draw("colz")
c.SetLogz()
c.SaveAs("plots/smearerCat_" + h2.GetName() + "_" + region_name + ".png")
for key in lt_h2:
    lt_h2[key].Draw("colz")
    c.SaveAs("plots/smearerCat_" + lt_h2[key].GetName() + "_" + region_name +
             ".png")