Example #1
0
def getBoardRelated():
    if not getBoard():
        return False
    thrSprint = Thread()
    thrVersion = Thread()
    lstVersion = []
    lstSprint = []
    glob_dic.tips.set_value('sid', [{}])
    glob_dic.tips.set_value('issues', [{}])
    thr_s = []
    thr_v = []
    for boardid in glob_dic.tips.get_value('board'):
        url, headers = prepare('getBoard','/{}/sprint'.format(str(boardid)))
        thrSprint = Thread(target = thread_download, args = (url,headers,lstSprint,True))
        thr_s.append(thrSprint)
        thrSprint.start()
        url, headers = prepare('getBoard','/{}/version'.format(str(boardid)))
        thrVersion = Thread(target = thread_download, args = (url,headers,lstVersion))
        thr_v.append(thrVersion)
        thrVersion.start()
    for t in thr_s:
        t.join()
    
    if lstSprint:
        if not goInto(lstSprint, 'sprint', 'name'):
            return False
    for t in thr_v:
        t.join()
    if not goInto(lstVersion, 'versions', 'name'):
        return False

    return True
Example #2
0
def streamsbysongovertime(data):
    """Streams by song over time"""
    songs = getsongs(data)
    periods = []
    streams = [[] for _ in songs]

    for period in data:
        count = [0] * len(songs)
        periods.append(month_abbr[period.month] + ' ' + period.year[2:])

        for sale in period.sales:
            if sale.method == 'Streaming':
                for i in range(len(songs)):
                    if sale.song == songs[i]:
                        count[i] += sale.units
                        break

        for i in range(len(count)):
            streams[i].append(count[i])

    plots = range(len(periods))
    prepare('line')
    stackplot(plots, streams, labels=songs)
    xlim(0, len(periods) - 1)
    xticks(plots, periods, rotation=90)
    legend()
    show()
Example #3
0
def process():
    logging.info("************************ process start ***************************************")
    all_data = ts.get_today_all()
    subset = all_data[['code', 'name', 'nmc']]
    stocks = [tuple(x) for x in subset.values]

    if utils.need_update_data():
        utils.prepare()
        data_fetcher.run(stocks)
        check_exit()

    strategies = {
        '海龟交易法则': turtle_trade.check_enter,
        '放量上涨': enter.check_volume,
        # '均线多头': keep_increasing.check,
        # '停机坪': parking_apron.check,
        # '回踩年线': backtrace_ma250.check,
    }

    if datetime.datetime.now().weekday() == 0:
        strategies['均线多头'] = keep_increasing.check

    for strategy, strategy_func in strategies.items():
        check(stocks, strategy, strategy_func)
        time.sleep(2)

    statistics(all_data, stocks)
    logging.info("************************ process   end ***************************************")
Example #4
0
def main():

    args = read_args()
    logging.info("Reading the file. This may take a few mintues")
    
    f_in, file_size = read_file(args.file_in, args.size)
    
    f = DNAFountain(file_in = f_in, 
                    file_size = file_size, 
                    chunk_size = args.size , 
                    rs = args.rs, 
                    max_homopolymer = args.max_homopolymer,
                    gc = args.gc,
                    delta = args.delta, 
                    c_dist = args.c_dist,
                    np = args.rand_numpy,
                    alpha = args.alpha, 
                    stop = args.stop)

    logging.info("Upper bounds on packets for decoding is %d (x%f)  with %f probability\n", int(json.loads(f.PRNG.debug())['K_prime']), 
                                                                                           json.loads(f.PRNG.debug())['Z'],
                                                                                           json.loads(f.PRNG.debug())['delta'])
    if (args.out == '-'):
        out = sys.stdout

    else: 
        out = open (args.out, 'w')
        pbar = tqdm(total= f.final, desc = "Valid oligos")

    prepare(args.max_homopolymer)
    
    used_bc = dict()

    
    while f.good < f.final:
        print('f.good',f.good,'f.final',f.final)
        d = f.droplet()
        #print('droplet is',d)
        print('f.screen(d) is', f.screen(d))
        if f.screen(d):
            print('in f.screen, d is',d)
            if not args.no_fasta:
                out.write(">packet {}_{}\n".format(f.good, d.degree))
            out.write("{}\n".format(d.to_human_readable_DNA()))

            if d.seed in used_bc:
                logging.error("Seed %d has been seen before\nDone", d.seed)
                sys.exit(1)

            used_bc[d.seed] = 1
         
            if (args.out != '-'):
                pbar.update()

    if (args.out != '-'):
        pbar.close()
    logging.info("Finished. Generated %d packets out of %d tries (%.3f)", f.good, f.tries, (f.good+0.0)/f.tries)

    out.close()
Example #5
0
def main_single(TRAIN, TEST, params, mname='BiLSTM_CRF'):
    # -------- log --------
    logpath = '/home/faust/PROJECTS/NEUTAG/CWS.log'
    logger = getlogger(logpath)

    # -------- tags --------
    tags = ['U', 'B', 'I', 'E']
    t2i = dict([(t, i) for i, t in enumerate(tags)])
    print(t2i)

    # -------- dict --------
    # dict match params
    dictpath = '/home/faust/PROJECTS/NEUTAG/data/dict'
    match_len = 5
    # _mode = 'board'
    # _dim = (match_len-1) * 2
    _mode = 'full'
    _dim = (match_len - 1) * 2 + 1
    # _mode = 'simple'
    # _dim = 3

    # -------- embedding --------
    dumppath = '/home/faust/PROJECTS/NEUTAG/data/gram2vec.pkl'
    c2i, embed_mat, embed_dim = pickle.load(open(dumppath, "rb"))

    # -------- traindata --------
    TRAINDATA = prepare(TRAIN, c2i=c2i, t2i=t2i, dictpath=dictpath, match_len=match_len, match_mode=_mode)
    TESTDATA = prepare(TEST, c2i=c2i, t2i=t2i, dictpath=dictpath, match_len=match_len, match_mode=_mode)

    # info = '/////////////////////-*- CWS_DR_ATT -*-/////////////////////'
    # logger.info(info)
    # print(info)
    #
    # tf.reset_default_graph()
    # from model.CWS_DR_ATT import CWS_DR
    # tbpath = '/files/faust/tf-data/NEUTAG/{}_'.format(_mode)
    # model = CWS_DR(ntags=len(t2i), embed_mat=embed_mat, embed_dim=embed_dim, match_dim=_dim)
    # train(model, mname='CWS_DR_ATT', params=params,
    #       traindata=TRAINDATA, c2i=c2i, t2i=t2i, valdata=TESTDATA, logger=logger, tbpath=tbpath)

    info = '/////////////////////-*- CWS_DC_ATT -*-/////////////////////'
    logger.info(info)
    print(info)

    tf.reset_default_graph()
    from model.CWS_DC_ATT import CWS_DC
    tbpath = '/files/faust/tf-data/NEUTAG/{}_'.format(_mode)
    model = CWS_DC(ntags=len(t2i), embed_mat=embed_mat, embed_dim=embed_dim, match_dim=_dim)
    train(model, mname='CWS_DC_ATT', params=params,
          traindata=TRAINDATA, c2i=c2i, t2i=t2i, valdata=TESTDATA, logger=logger, tbpath=tbpath)
Example #6
0
 def _init_dir_path(self):
     """
         init dir path
     """
     utils.prepare(self.log_dir)
     utils.prepare(self.log_dir, "actions")
     utils.prepare(self.log_dir, "rewards")
     utils.prepare(self.log_dir, "checkpoints")
Example #7
0
def streamsbystoreovertime(data):
    """Streams by store over time"""
    stores = getstores(data)
    periods = []
    streams = [[] for _ in stores]

    for period in data:
        count = [0] * len(stores)
        periods.append(month_abbr[period.month] + ' ' + period.year[2:])

        for sale in period.sales:
            if sale.method == 'Streaming':
                for i in range(len(stores)):
                    if sale.store == stores[i]:
                        count[i] += sale.units
                        break

        for i in range(len(count)):
            streams[i].append(count[i])

    cloud = 0
    mp3 = 0
    unlimited = 0

    for i in range(len(stores)):
        if stores[i] == 'Amazon Cloud':
            cloud = i
            stores[i] = 'Amazon'
        elif stores[i] == 'Amazon MP3':
            mp3 = i
        elif stores[i] == 'Amazon Unlimited':
            unlimited = i
        elif stores[i] == 'YouTube Music Key':
            stores[i] = 'YouTube'

    for i in range(len(streams[0])):
        streams[cloud][i] += streams[unlimited][i] + streams[mp3][i]

    for i in sorted([mp3, unlimited], reverse=True):
        del stores[i], streams[i]

    plots = range(len(periods))
    prepare('line')
    stackplot(plots, streams, labels=stores)
    xlim(0, len(periods) - 1)
    xticks(plots, periods, rotation=90)
    legend()
    show()
Example #8
0
def query_number(lst):
    issue = lst[0].upper()
    url, headers = prepare('query_number', '/{}'.format(issue))
    f, r = send_request(url, method.Get, headers, None, None)
    if not f:
        return False, r
    return True, getResponse([r])
Example #9
0
def getAssignee():
    url, headers = prepare('getAssignee')
    lst=[]
    thr = Thread()
    thr_lst = []
    for i in range(0,10000,100):
        url, headers = prepare('getAssignee')
        thr = Thread(target=getUserHelper, args=(lst,url,headers,i))
        thr_lst.append((thr))
        thr.start()
    for t in thr_lst:
        t.join()
    if goInto(lst, 'assignee', 'key'):
        return goInto(lst, 'reporter', 'key')
    else: 
        return False
Example #10
0
def issue_display_info(issue):
    url, headers = prepare('query_number', '/{}'.format(issue.upper()))
    f, r = send_request(url, method.Get, headers, None, None)
    if not f:
        return False, r
    dic = r.get('fields')
    lst = [
        'status', 'issuetype', 'summary', 'reporter', 'priority', 'labels',
        'description', 'assignee', 'sprint'
    ]
    s = 'Here are the information of {}: '.format(issue)
    stringLst = [s]
    for i, field in enumerate(lst):
        obj = dic.get(field)
        if field == 'labels':
            try:
                global glob_labels
                glob_labels = obj
            except AttributeError:
                pass
        if isinstance(obj, dict):
            s = '{}.{}: {}'.format(i + 1, field, obj.get('name'))
        else:
            s = '{}.{}: {}'.format(i + 1, field, dic.get(field))
        stringLst.append(s)
    stringLst.append(
        'Please enter the corresponding number of the field you want to edit, separated by comma: '
    )
    return True, stringLst
Example #11
0
def bango(request):
    """
    Pass the request through to Bango. There's one job:
    1) Parse the XML and insert the correct user and password
    """
    namespaces = [
        'com.bango.webservices.billingconfiguration',
        'com.bango.webservices.directbilling',
        'com.bango.webservices.mozillaexporter'
    ]

    new_request = prepare(request)
    # Alter the XML to include the username and password from the config.
    root = etree.fromstring(new_request['data'])

    tags = lambda name: ['{%s}%s' % (n, name) for n in namespaces]
    username, password = tags('username'), tags('password')
    changed_username, changed_password = False, False

    for element in root.iter():
        if element.tag in username:
            element.text = settings.BANGO_AUTH.get('USER', '')
            changed_username = True
        elif element.tag in password:
            element.text = settings.BANGO_AUTH.get('PASSWORD', '')
            changed_password = True
        # No point in process the rest of the body if both have
        # been changed.
        if changed_username and changed_password:
            break

    new_request['data'] = etree.tostring(root)
    return send(new_request)
Example #12
0
def bango(request):
    """
    Pass the request through to Bango. There's one job:
    1) Parse the XML and insert the correct user and password
    """
    namespaces = [
        'com.bango.webservices.billingconfiguration',
        'com.bango.webservices.directbilling',
        'com.bango.webservices.mozillaexporter'
    ]

    new_request = prepare(request)
    # Alter the XML to include the username and password from the config.
    root = etree.fromstring(new_request['data'])

    tags = lambda name: ['{%s}%s' % (n, name) for n in namespaces]
    username, password = tags('username'), tags('password')
    changed_username, changed_password = False, False

    for element in root.iter():
        if element.tag in username:
            element.text = settings.BANGO_AUTH.get('USER', '')
            changed_username = True
        elif element.tag in password:
            element.text = settings.BANGO_AUTH.get('PASSWORD', '')
            changed_password = True
        # No point in process the rest of the body if both have
        # been changed.
        if changed_username and changed_password:
            break

    new_request['data'] = etree.tostring(root)
    return send(new_request)
Example #13
0
    def __getitem__(self, i):
        img = cv.imread(self.img_paths[i])
        img = cv.cvtColor(img, cv.COLOR_BGR2RGB)

        boxes, scores = self.get_boxes_and_scores(i)
        gt_boxes, gt_labels = self._get_annotations(i)

        if self.split == "trainval":
            img, boxes, gt_boxes = prepare(
                img,
                boxes,
                random.choice(self.scales),
                random.choice([False, True]),
                gt_boxes,
                gt_labels,
            )
            target = self.get_target(gt_labels)
            return self.ids[i], img, boxes, scores, target
        elif self.split == "test":
            img_id = self.ids[i]
            orig_img = TRANSFORMS(img)
            scaled_imgs = []
            scaled_boxes = []

            for xflip in [True, False]:
                for max_dim in self.scales:
                    scaled_img, tmp_scaled_boxes, _ = prepare(
                        img,
                        boxes,
                        max_dim=max_dim,
                        xflip=xflip,
                        gt_boxes=[],
                        gt_labels=[],
                    )
                    scaled_imgs.append(scaled_img)
                    scaled_boxes.append(tmp_scaled_boxes)

            return (
                img_id,
                orig_img,
                boxes,
                scaled_imgs,
                scaled_boxes,
                scores,
                gt_boxes,
                gt_labels,
            )
Example #14
0
def valuebystore(data):
    """Value by store"""
    stores = getstores(data)

    for i in range(len(stores)):
        if stores[i] == 'Amazon MP3':
            del stores[i]
            break

    totals = [0] * len(stores)
    streams = [0] * len(stores)

    for period in data:
        for sale in period.sales:
            if sale.method == 'Streaming':
                for i in range(len(stores)):
                    if sale.store == stores[i]:
                        totals[i] += sale.total
                        streams[i] += sale.units
                        break

    cloud = 0
    unlimited = 0

    for i in range(len(stores)):
        if stores[i] == 'Amazon Cloud':
            cloud = i
            stores[i] = 'Amazon'
        elif stores[i] == 'Amazon Unlimited':
            unlimited = i
        elif stores[i] == 'YouTube Music Key':
            stores[i] = 'YouTube'

    totals[cloud] += totals[unlimited]

    for i in sorted([unlimited], reverse=True):
        del stores[i], totals[i]

    plots = range(len(stores))
    values = [totals[i] / streams[i] for i in plots]
    sort = sorted(zip(values, stores), reverse=True)
    values = [value for value, store in sort]
    stores = [store for value, store in sort]
    prepare('bar')
    bar(plots, values)
    xticks(plots, stores)
    show()
Example #15
0
def issue_get_tansition(issue, dic):
    url, headers = prepare('issue', '/{}/transitions'.format(issue))
    f, r = send_request(url, method.Get, headers, None, None)
    if not f:
        return None
    for msg in r.get('transitions'):
        dic[msg.get('name')] = msg.get('id')
    return dic
Example #16
0
def issue_addComment(lst):
    issue = lst[0]
    url, headers = prepare('issue', '/{}/{}'.format(issue, 'comment'))
    data = json.dumps({"body": lst[1]})
    f, r = send_request(url, method.Post, headers, None, data)
    if not f:
        return False, r
    mylog.info(r)
    return True, ['Comment(ID: ' + r['id'] + ')added']
Example #17
0
def process():
    logging.info(
        "************************ process start ***************************************"
    )
    utils.prepare()
    data_fetcher.run()

    check_exit()

    stocks = utils.get_stocks()
    m_filter = check_enter(end_date=None)
    results = list(filter(m_filter, stocks))

    logging.info('选股结果:{0}'.format(results))
    notify.notify('选股结果:{0}'.format(results))
    logging.info(
        "************************ process   end ***************************************"
    )
Example #18
0
 def sent2corpora(self, sentence, synonym=False):
     sentence = prepare(sentence)
     corpora = [e for e in self.jb.segment_search(sentence) if self.swf.filter(e)]
     new_corpora = []
     for corpus in corpora:
         if synonym and corpus in self.jb.synonym:
             corpus = self.jb.synonym[corpus]
         new_corpora.append(corpus)
     return new_corpora
Example #19
0
def main(_):
    prepare(config)
    rng = set_random_seed(config.seed)

    sess_config = tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True)
    sess_config.gpu_options.allow_growth=True

    trainer = Trainer(config, rng)

    with tf.Session(config=sess_config) as sess:
        if config.train:
            trainer.train(sess)
        else:
            if not config.map:
                raise Exception("[!] You should specify `map` to synthesize a program")
            trainer.synthesize(sess, config.map)
Example #20
0
def streamsbycountry(data):
    """Streams by country"""
    countries = getcountries(data)
    streams = [0] * len(countries)

    for period in data:
        for sale in period.sales:
            if sale.method == 'Streaming':
                for i in range(len(countries)):
                    if sale.country == countries[i]:
                        streams[i] += sale.units
                        break

    sort = sorted(zip(streams, countries))
    streams = [count for count, country in sort]
    countries = [country for count, country in sort]
    prepare('pie')
    pie(streams, labels=countries, startangle=90)
    show()
Example #21
0
def getAll(sname, sid):
    

    url,headers = prepare('assign_sprint','/{}/issue'.format(sid))
    
    f,r = send_request(url, method.Get, headers, None, None)
    
    if f:
        for issue in r.get('issues'):
            glob_dic.tips.get_value('issues')[0][issue.get('key').upper()] = sname
def freq_max_root(page, father, output):
	output.write("\n" + page)
	ls = utils.prepare(page)
	for leaf in ls:
		grandpa = leaf.parent.parent
		xpath_grandpa = utils.generate_xpath(grandpa)
		if father.get((xpath_grandpa, leaf)) == None:
			father[(xpath_grandpa, leaf)] = 1 
		else: 
			father[(xpath_grandpa, leaf)] += 1
	#print(father)
	return father
Example #23
0
def getVersion():
    getProject()
    lst = []
    for i, p in enumerate(glob_dic.tips.get_value('project')):
        print('Iteration %d' % i)
        url, headers = prepare('getVersion','/{}/versions'.format(p))
        
        f, r = send_request(url, method.Get, headers, None, None)
        if not f:
            return False
        lst += r
    return goInto(lst, 'versions', 'name')
Example #24
0
def issue_assign(lst):
    issue = lst[0]
    assignee = lst[1]
    url, headers = prepare('issue', '/{}/assignee'.format(issue))
    data = '{"name":"' + assignee + '"}'

    f, r = send_request(url, method.Put, headers, None, data)
    if not f:
        return False, r
    msg = '{} successfully assigned to {}'.format(issue, assignee)
    mylog.info(msg)
    return True, [msg]
Example #25
0
def process():
    logging.info(
        "************************ process start ***************************************"
    )
    try:
        all_data = ts.get_today_all()
        subset = all_data[['code', 'name', 'nmc']]
        subset.to_csv(settings.config['stocks_file'], index=None, header=True)
        stocks = [tuple(x) for x in subset.values]
        statistics(all_data, stocks)
    except urllib.error.URLError as e:
        subset = pd.read_csv(settings.config['stocks_file'])
        subset['code'] = subset['code'].astype(str)
        stocks = [tuple(x) for x in subset.values]

    if utils.need_update_data():
        utils.prepare()
        data_fetcher.run(stocks)
        check_exit()

    strategies = {
        '海龟交易法则': turtle_trade.check_enter,
        '放量上涨': enter.check_volume,
        '突破平台': breakthrough_platform.check,
        '均线多头': keep_increasing.check,
        '无大幅回撤': low_backtrace_increase.check,
        '停机坪': parking_apron.check,
        '回踩年线': backtrace_ma250.check,
    }

    if datetime.datetime.now().weekday() == 0:
        strategies['均线多头'] = keep_increasing.check

    for strategy, strategy_func in strategies.items():
        check(stocks, strategy, strategy_func)
        time.sleep(2)

    logging.info(
        "************************ process   end ***************************************"
    )
Example #26
0
def downloadsovertime(data):
    """Downloads over time"""
    periods = []
    downloads = []

    for period in data:
        count = 0
        periods.append(month_abbr[period.month] + ' ' + period.year[2:])

        for sale in period.sales:
            if sale.method == 'Download':
                count += sale.units

        downloads.append(count)

    plots = range(len(periods))
    prepare('line')
    plot(plots, downloads)
    xlim(0, len(periods) - 1)
    ylim(bottom=0)
    xticks(plots, periods, rotation=90)
    show()
Example #27
0
def login(lst):
    un = lst[0]
    pw = lst[1]
    url = prepare('login')[0]
    headers = {'Content-Type': 'application/json'}
    data = json.dumps({"username": un, "password": pw})
    try:
        r = requests.post(
            url,
            headers=headers,
            data=data,
            timeout=glob_dic.get_value('timeout'),
            verify=False)
        mylog.error(r.text)
        print('login here')
        if r.status_code == 401:
            mylog.error("401 Unauthorized")
            return False,['401 Unauthorized!','Please make sure the username and password are correct']
        try:
            r.raise_for_status()
        except requests.exceptions.HTTPError as err:
            mylog.error(err)
            try:
                errmsg = r.json()['errorMessages'][0]
                mylog.error(errmsg)
            except KeyError:
                pass
            except json.decoder.JSONDecodeError:
                pass
            return (
                False,
                ['Login failed! Please make sure that your username and password are correct!']
            )
        j = r.json()
        try:
            glob_dic.set_value(
                'cookie', j['session']['name'] + '=' + j['session']['value'])
        except KeyError:
            mylog.error('No session information from HTTP response\r\n' +
                        r.text)
            return (False, ['session info not found!'])
        f = open(glob_dic.get_value('cookie_path') + "cookie.txt", "w")
        f.write(glob_dic.get_value('cookie'))
        f.close()
        mylog.info("Successfully logged in as " + un)
        thr = Thread(target=download, args=(None, un))
        thr.start()
        return (True, ["Success"])
    except requests.exceptions.RequestException as err:
        mylog.error(err)
        return (False, ['Login failed due to an internet error!'])
Example #28
0
def issue_edit(lst):
    issue = lst[0]
    lst = lst[1:]
    url, headers = prepare('issue', '/{}'.format(issue))
    status = lst[0]
    if not issue_transit([issue, status]):
        return False, ['Error occured during transit']
    lst = lst[1:]
    issuetype = {"name": lst[0].capitalize()}
    summary = lst[1]
    reporter = {"name": lst[2]}
    priority = {"name": lst[3].capitalize()}
    labels = lst[4].split(' ')
    description = lst[5]
    assignee = {"name": lst[6]}
    if labels != ['']:
        global glob_labels
        r_lst = [issue, 'remove']
        a_lst = [issue, 'add']
        for label in labels:
            if label in glob_labels:
                r_lst.append(label)
            else:
                a_lst.append(label)
        if not issue_edit_labels(r_lst):
            return False, ['error occur while removing labels']
        if not issue_edit_labels(a_lst):
            return False, ['error occur while adding labels']
    glob_labels = []
    fields = [issuetype, summary, reporter, priority, description, assignee]
    keys = [
        'issuetype', 'summary', 'reporter', 'priority', 'description',
        'assignee'
    ]
    dic = {}
    for i in range(0, len(fields)):
        if lst[i] != "":
            dic[keys[i]] = fields[i]
    data = json.dumps({"fields": dic})
    f, r = send_request(url, method.Put, headers, None, data)
    if not f:
        return False, r
    if lst[7]:
        f, r = issue_assign_sprint(issue, lst[7])
        if not f:
            return False, [
                'Problem occured while assigning the issue to target sprint: All other fields have been updated!'
            ] + r
    return True, ['Edit Success']
Example #29
0
def valuebycountry(data):
    """Value by country"""
    countries = getcountries(data)
    totals = [0] * len(countries)
    streams = [0] * len(countries)
    plots = range(len(countries))

    for period in data:
        for sale in period.sales:
            if sale.method == 'Streaming':
                for i in plots:
                    if sale.country == countries[i]:
                        totals[i] += sale.total
                        streams[i] += sale.units
                        break

    values = [totals[i] / streams[i] for i in plots]
    sort = sorted(zip(values, countries), reverse=True)
    values = [value for value, country in sort]
    countries = [country for value, country in sort]
    prepare('bar')
    bar(plots, values)
    xticks(plots, countries)
    show()
Example #30
0
def logout():
    url, headers = prepare('logout')
    try:
        r = requests.delete(url, headers = headers, timeout=5)
        try:
            r.raise_for_status()
        except requests.exceptions.HTTPError:
            pass
    except requests.exceptions.RequestException:
        pass
    f = open(glob_dic.get_value('cookie_path') + "cookie.txt", "w")
    f.write('')
    f.close
    glob_dic.set_value('cookie', '')
    mylog.info('Successfully logged out')
    return (True, ['Successfully logged out'])
Example #31
0
def issue_transit(lst):
    issue = lst[0]
    status = lst[1].title()
    url, headers = prepare(
        'issue', '/{}/transitions?expand=transitions.fields'.format(issue))
    data = {}
    dic = {}
    if not issue_get_tansition(issue, dic):
        return False, ['no transit is avaiable']
    transition = {"id": dic.get(status)}
    data = json.dumps({"transition": transition})
    f, r = send_request(url, method.Post, headers, None, data)
    if not f:
        return False, ['Error occured durin transit'] + r
    msg = 'Status of {} has been changed to {}'.format(issue, status)
    mylog.info(msg)
    return True, [msg]
Example #32
0
def braintree(request):
    """
    Pass the request through to Braintree. There are two jobs to do:
    1) Add in the Braintree auth into the HTTP headers
    2) Ensure that requests will check the correct Braintree crt.
    """
    new_request = prepare(request)
    # Until https://github.com/mozilla/solitude-auth/pull/3 is merged.
    new_request['headers']['Content-Type'] = 'application/xml; charset=utf-8'
    # Add in the correct Braintree Authorization.
    new_request['headers']['Authorization'] = b"Basic " + encodebytes(
        settings.BRAINTREE_PUBLIC_KEY.encode('ascii') + b":" +
        settings.BRAINTREE_PRIVATE_KEY.encode('ascii')).strip()
    # Taken from http://bit.ly/1cBESdC and ensures that the
    # crt is passed through to the requests verify parameter.
    new_request['verify'] = (
        Environment.braintree_root() + '/ssl/api_braintreegateway_com.ca.crt')

    return send(new_request)
Example #33
0
def reference(request, reference_name):
    """
    Pass through the request to the reference implementation.
    We have to:
    * get the provider from the URL
    * sign the request with OAuth
    """
    if reference_name not in settings.ZIPPY_CONFIGURATION:
        raise ValueError('Unknown provider: {}'.format(reference_name))

    new_request = prepare(request)
    new_request['url'] = reference_url(request, new_request, reference_name)

    sign_request(
        None,
        settings.ZIPPY_CONFIGURATION[reference_name]['auth'],
        headers=new_request['headers'],
        method=new_request['method'].upper(),
        params={'oauth_token': 'not-implemented'},
        url=new_request['url'])
    return send(new_request)
Example #34
0
def braintree(request):
    new_request = prepare(request)
    new_request['headers']['Authorization'] = braintree_authorization()
    return send(new_request)
            if "chord" in f:
                chord_values.append(count/nsize)
                x_values1.append(nsize)
            else:
                best_values.append(count/nsize)
                x_values2.append(nsize)


    plt.figure().set_size_inches(6.5,5)
    plt.xlabel("#Nodes")
    plt.ylabel("Per-node Replication Maintenance Cost (KB)")

    from matplotlib.ticker import EngFormatter
    formatter = EngFormatter(places=0)
    plt.gca().xaxis.set_major_formatter(formatter)

    plt.yscale('log')
    plt.xlim(0,1000000)

    out_file = "intro_rep_ma.pdf"

    d1 = prepare(x_values1,chord_values)
    d2 = prepare(x_values2,best_values)

    d1['label'] = 'Neighbor Replication'
    d1['linestyle'] = 'dashed'
    d2['label'] = 'Most-Available Replication'

    plot(out_file,d1,d2)

            for i in monitor:
                monitor_d.append(get_num_dict(i))

            t_d = expand_num_dict(monitor_d[0]) 
            chord_monitor.append(float(sum(t_d))/len(t_d))
            t_d = expand_num_dict(monitor_d[1]) 
            vserver_monitor.append(float(sum(t_d))/len(t_d))

            x_values.append(next(get_numbers(f)))

    plt.figure().set_size_inches(6.5,5)
    plt.xlabel("#Nodes")
    plt.ylabel("#Monitored Nodes")

    from matplotlib.ticker import EngFormatter
    formatter = EngFormatter(places=0)
    plt.gca().xaxis.set_major_formatter(formatter)

    plt.xlim(0,1000000)

    out_file = "intro_mon_chord.pdf"

    d1 = prepare(x_values,chord_monitor)
    d2 = prepare(x_values,vserver_monitor)

    d1['label'] = 'Neighbor Replication'
    d1['linestyle'] = 'dashed'
    d2['label'] = "Virtual Servers"

    plot(out_file,d1,d2)
            loads_d = []
            for i in loads:
                loads_d.append(get_num_dict(i))

            chord_loads.append(get_50_percent(loads_d[0]))
            vserver_loads.append(get_50_percent(loads_d[1]))

            x_values.append(next(get_numbers(f)))

    plt.figure().set_size_inches(6.5,5)
    plt.xlabel("#Nodes")
    plt.ylabel("% of nodes storing 50% of data")

    from matplotlib.ticker import EngFormatter
    formatter = EngFormatter(places=0)
    plt.gca().xaxis.set_major_formatter(formatter)

    plt.ylim(0,0.5)
    plt.xlim(0,1000000)

    out_file = "intro_lb_chord.pdf"

    d1 = prepare(x_values,chord_loads)
    d2 = prepare(x_values,vserver_loads)

    d1['label'] = 'Neighbor Replication'
    d1['linestyle'] = 'dashed'
    d2['label'] = "Virtual Servers"

    plot(out_file,d1,d2)
                continue
            loads_d = []
            for i in loads:
                loads_d.append(get_num_dict(i))

            best_loads.append(get_50_percent(loads_d[0]))

            x_values2.append(next(get_numbers(f)))

    plt.figure().set_size_inches(6.5,5)
    plt.xlabel("#Nodes")
    plt.ylabel("% of nodes storing 50% of data")

    from matplotlib.ticker import EngFormatter
    formatter = EngFormatter(places=0)
    plt.gca().xaxis.set_major_formatter(formatter)

    plt.ylim(0,0.5)
    plt.xlim(0,1000000)

    out_file = "intro_lb_ma.pdf"

    d1 = prepare(x_values1,chord_loads)
    d2 = prepare(x_values2,best_loads)

    d1['label'] = 'Neighbor Replication'
    d1['linestyle'] = 'dashed'
    d2['label'] = 'Most-Available Replication'

    plot(out_file,d1,d2)