Ejemplo n.º 1
0
def generate_non_unique_polygon_dict(datasets):
    out = {}
    for d in [d for d in datasets if 'POLY' in d['location_rpt']]:
        p = d['location_rpt']
        if p in out:
            out[p] += [util.get_id(d)]
        else:
            out[p] = [util.get_id(d)]
    return {k: out[k] for k in out if len(out[k]) > 1 and len(collections.Counter(out[k])) > 1}
Ejemplo n.º 2
0
def read_excel(filename, username):
    filename = './sample/contract/' + filename + ".xlsx"
    workbook = xlrd.open_workbook(filename)
    sheet = workbook.sheet_by_index(0)
    row = sheet.nrows
    col = sheet.ncols
    print("行数", row, "列数", col)
    contract_name = sheet.cell(1, 1).value
    print(contract_name)
    contract_id = util.get_id(username, contract_name)
    party_a = sheet.cell(2, 2).value
    print(party_a)
    sig_a = "lll"
    party_b = sheet.cell(3, 2).value
    print(party_b)
    sig_b = "lll"
    valid_time = "2019-08-14"
    object_desc = ''
    content = []
    for i in range(8, row):
        res = {}
        res['person'] = sheet.cell(i, 1).value
        res['premise'] = sheet.cell(i, 2).value
        res['res'] = sheet.cell(i, 3).value
        res['time'] = ""
        print(sheet.cell(i, 3).value)
        content.append(res)
    print(content)
    db.save_contract(username, contract_name, contract_id, party_a, sig_a,
                     party_b, sig_b, valid_time, object_desc,
                     json.dumps(content))
Ejemplo n.º 3
0
def get_csv_field_to_func_map():
    '''
    Returns a map of an output csv field to a function that takes a single
    argument which is a user row (dict object).

    Most of the functions will simply pull the desired value striaght out
    of the dict argument but this design is chosen to provide flexibility
    in performing post-processing on the values contained in the dict
    argument should the schema mappings change, while still separating
    the boilerplate work of dumping the final values out to a csv. The
    boilerplate work is left to util.write_simple_csv.
    '''
    return {
        '_id': lambda forum_entry: util.get_id(forum_entry),
        'original_id': lambda forum_entry: forum_entry['_id']['$oid'],
        'course_id': lambda forum_entry: forum_entry['course_id'],
        'author_id': lambda forum_entry: forum_entry['author_id'],
        'created_at': get_created_at_date,
        'updated_at': get_updated_at_date,
        # body fields can contain unicode not encodable by ascii,
        # so it must be encoded to utf-8 to avoid a UnicodeEncodeError.
        'body': lambda forum_entry: forum_entry['body'].encode('utf-8'),
        'type': lambda forum_entry: forum_entry['_type'],
        'title': get_title,
        'thread_type': get_thread_type,
        'comment_thread_id': get_comment_thread_id,
        'parent_id': get_parent_id
    }
Ejemplo n.º 4
0
def process_text(event):
    """Process text and return a dictionary containing the appropriate response.
    Make sure that event is a text message event.
    """
    if event['type'] != 'message' or event['message']['type'] != 'text':
        return not_implemented_response()

    message = event['message']
    text = message['text']
    id = get_id(event['source'])

    response = {}

    user_state = get_user_state(id)
    if user_state == 'light':
        response = process_light(text, id, has_queried_date=True)
    elif user_state == 'distance':
        response = process_distance(text, id, has_queried_date=True)
    else:
        if "光" in text:
            response = process_light(text, id)
        elif "距離" in text:
            response = process_distance(text, id)
        else:
            response = not_implemented_response()

    return response
Ejemplo n.º 5
0
def write_courses_csv(dictionary, output_path):
    '''
    Write the courses csv out to output_path,
    according to the dictionary representing the loaded
    {org}-{course}-{run}-course_structure-{site}-analytics.json file
    '''
    if not contains_course(dictionary):
        raise ValueError('loaded json file is missing a course entry')

    course_entry_key = get_course_entry_key(dictionary)
    course_entry = dictionary[course_entry_key]
    output_file = open(output_path, 'w')
    writer = csv.DictWriter(output_file,
                            delimiter=',',
                            fieldnames=get_csv_fields(),
                            quotechar='"',
                            escapechar='\\',
                            lineterminator='\n')

    writer.writerow({
        '_id': str(util.get_id(course_entry)),
        'original_id': get_original_id(dictionary),
        'name': get_name(course_entry),
        'year': get_year(course_entry),
        'org': get_org(course_entry_key),
        'instructor': get_instructor(),
        'description': get_description(),
        'start_date': get_start_date(course_entry),
        'end_date': get_end_date(course_entry),
        'course_url': get_course_url(),
        'image_url': get_image_url()
    })
    output_file.close()
Ejemplo n.º 6
0
def check_avail(file_path):
    ID = util.get_id(os.path.basename(file_path))
    avail_list = []
    with open(config.AVAIL) as fr:
        for line in fr:
            avail_list.append(line.strip('\n'))
    return ID in avail_list
Ejemplo n.º 7
0
 def test_get_id(self):
     '''
     Simple check that ensures the hash function works and
     the hashed id is in legal format.
     '''
     course_entry = {
         'category': 'course',
         'field_a': 'value_1',
         'field_b': 'value_2'
     }
     course_entry_copy = course_entry.copy()
     self.assertEqual(util.get_id(course_entry),
                      util.get_id(course_entry_copy))
     course_entry_copy['field_b'] = 'value_3'
     self.assertNotEqual(util.get_id(course_entry),
                         util.get_id(course_entry_copy))
     self.assertEqual('2538887322753988521', str(util.get_id(course_entry)))
Ejemplo n.º 8
0
def saveContract():
    args = request.get_json()
    contract_id = util.get_id(args['contract_name'])
    jsondata = json.dumps(args['content'])
    GenerateDGA(jsondata, contract_id)
    res = db.save_contract(args['contract_name'], contract_id, args['Obligor'],
                           args['creditor'], jsondata)

    ConctracList = db.getConctracList()
    return render_template('contractList.html', contractList=ConctracList), 200
Ejemplo n.º 9
0
def save():
    args = request.get_json()
    contract_id = util.get_id(args['username'], args['contract_name'])
    db.save_contract(args['username'], args['contract_name'], contract_id, args['party_a'], args['sig_a'],
        args['party_b'], args['sig_b'], args['valid_time'], args['object_desc'], json.dumps(args['content']))
    print(args['content'])
    #t = threading.Thread(target=create_task, args=(args['content'],contract_id))
    #t.start()
    #t.join()
    #create_task(json.dumps(args['content']),contract_id)
    return 'success'
Ejemplo n.º 10
0
def get_course_video_csv_field_to_func_map(dictionary):
    '''
    Returns a map of the course_video output csv field
    to a function that takes a single argument which is a string.

    Most of the functions will simply pull the desired value straight out
    of the dict argument but this design is chosen to provide flexibility
    in performing post-processing on the values contained in the dict
    argument should the schema mappings change, while still separating
    the boilerplate work of dumping the final values out to a csv. The
    boilerplate work is left to util.write_simple_csv.
    '''
    course_id = courses.get_original_id(dictionary)
    return {
        '_id': lambda video_id: str(util.get_id(video_id)),
        'course_id': lambda _: course_id,
        'video_id': lambda video_id: video_id
    }
Ejemplo n.º 11
0
    def infer(self, model):
        #        self=eval

        model.eval()
        if self.count >= len(self.file_list):
            self.count = 0
        if self.iter % (32 / self.batch_size) == 0:
            self.file = self.file_list[self.count]
            path = os.path.join(self.dir, self.file)
            self.input, self.target, self.idx2oov = util.get_id(
                path, input_len=self.max_len, target_len=self.summ_len)
            self.ind = 0
            self.count += 1
            with open('stats/e_count.txt', 'a') as handle:
                handle.write(str(self.count) + '\n')
        with torch.no_grad():
            src = num_to_var(
                self.input[self.ind *
                           self.batch_size:self.ind * self.batch_size +
                           self.batch_size, :]).to(self.device)  #8,400
            decoder_inp = num_to_var(
                self.target[self.ind *
                            self.batch_size:self.ind * self.batch_size +
                            self.batch_size, :]).to(self.device)
        self.ind += 1

        prob, loss = model.evaluate(src,
                                    decoder_inp,
                                    trg_len=self.summ_len + 1)
        #        loss=calc_loss(prob,decoder_inp[:,1:],self.optim)
        pred = prob.argmax(-1)

        real_summ = util.list_to_summ(
            decoder_inp[-1, 1:].cpu().numpy().tolist(), self.idx2oov)
        summ = util.list_to_summ(pred[-1, :].cpu().numpy().tolist(),
                                 self.idx2oov)

        with open('stats/e_loss.txt', 'a') as handle:
            handle.write(str(loss.item()) + '\n')

        self.iter += 1

        return real_summ, summ, loss.item(), self.file
Ejemplo n.º 12
0
def get_videos_csv_field_to_func_map(dictionary):
    '''
    Returns a map of the videos output csv field
    to a function that takes a single argument which is a string.

    Most of the functions will simply pull the desired value straight out
    of the dict argument but this design is chosen to provide flexibility
    in performing post-processing on the values contained in the dict
    argument should the schema mappings change, while still separating
    the boilerplate work of dumping the final values out to a csv. The
    boilerplate work is left to util.write_simple_csv.
    '''
    video_id_to_section_map = get_video_id_to_section_map(dictionary)
    return {
        '_id': lambda video_id: str(util.get_id(video_id)),
        'original_id': lambda video_id: video_id,
        'name':
        lambda video_id: dictionary[video_id]['metadata']['display_name'],
        'section': lambda video_id: video_id_to_section_map[video_id],
        'description': lambda _: '',
        'url': lambda video_id: get_url(dictionary, video_id)
    }
Ejemplo n.º 13
0
# @Date: 20160225

import sys
sys.path.append("..")
import util

__author__ = "jupiterguo"


''' func called from util
def active_code(id, length=10):
    # id + L + random code
    prefix = hex(int(id))[2:] + 'L'
    length = length - len(prefix)
    
    # letters for a-zA-Z, digits for 0-9
    chars = string.ascii_letters + string.digits
    return prefix + ''.join([random.choice(chars) for i in range(length)])

def get_id(code):
    # Hex to Dec
    return str(int(code.upper(), 16))
'''

if __name__ == "__main__":
    for i in range(98, 502, 2):
        code = util.active_code(i)
        id = util.get_id(code)
        print code, id

Ejemplo n.º 14
0
with open('stats/count.txt', 'r') as handle:
    count_lst = handle.read().strip().split('\n')

iter = 0
batch_size = 8

count = len(count_lst) % len(names)
eval = eval1.Infer()
for iter in range(1800003):
    #    break
    if count >= len(names):
        count = 0
    if iter % (32 / batch_size) == 0:
        file = names[count]
        input, target, idx2oov = util.get_id(file,
                                             input_len=100,
                                             target_len=25)
        ind = 0
        with open('stats/count.txt', 'a') as handle:
            handle.write(str(count) + '\n')
        count += 1
        l = [j for j in range(int(32 / batch_size))]
    inp = num_to_var(input[ind * batch_size:ind * batch_size +
                           batch_size, :]).to(device)  #8,400
    decoder_inp = num_to_var(target[ind * batch_size:ind * batch_size +
                                    batch_size, :]).to(device)

    prob, loss = model(inp, decoder_inp, device)
    loss = update(prob, decoder_inp[:, 1:], get_token(decoder_inp[:, 1:]),
                  loss)
    dec_optimizer.step()
    # Return error
    return error


iterations, print_every = 75000, 1

#iterations,count=1,0
for iter in range(iterations):
    cov_loss, loss = 0, 0
    if count >= len(file_list):
        count = 0
    if iter % (8 / args['batch']) == 0:
        file = file_list[count]
        path = os.path.join(dir, file)
        input, target, idx2oov = util.get_id(path, input_len, target_len)
        with open('count.txt', 'a') as output:
            output.write(str(count))
            output.write('\n')
        count += 1
        ind = 0

    l = [j for j in range(int(8 / args['batch']))]
    inp = num_to_var(input[ind * args['batch']:ind * args['batch'] +
                           args['batch'], :]).to(device)  #8,400
    decoder_inp = num_to_var(target[ind * args['batch']:ind * args['batch'] +
                                    args['batch'], :]).to(device)

    preds_summ, preds, p_final, attn = model.genrate(enc, dec, inp,
                                                     decoder_inp, args,
                                                     input_len, target_len)
Ejemplo n.º 16
0
def _name_model(name: str) -> str:
    return "{}-{}".format(name, get_id())