Esempio n. 1
0
 def post(self, request, app_name, env_name, app_path):
     action = request.data['action']
     if action == 'rename':
         env_path = _get_existent_env_path(app_path, env_name)
         new_env_name = request.data['name']
         check_name(new_env_name)
         new_env_path = _get_absent_env_path(app_path, new_env_name)
         stop_patsaks(get_id(request.dev_name, app_name, env_name))
         write_file(new_env_path, new_env_name)
         schema_prefix = get_id(request.dev_name, app_name) + ':'
         execute_sql(
             'SELECT ak.rename_schema(%s, %s)',
             (schema_prefix + env_name.lower(),
              schema_prefix + new_env_name.lower()))
         os.remove(env_path)
         return HttpResponse()
     if action == 'eval':
         request.lock.release()
         request.lock = None
         if env_name == _RELEASE_ENV_NAME:
             env_name = None
         response = send_to_ecilop(
             'EVAL ' + get_id(request.dev_name, app_name, env_name),
             request.data['expr'])
         assert response
         status = response[0]
         result = response[1:]
         assert status in ('E', 'F', 'S')
         if status == 'E':
             raise Error(result, status=NOT_FOUND)
         return {'ok': status == 'S', 'result': result}
     raise Error('Unknown action: "%s"' % action)
Esempio n. 2
0
 def comment_string(self, domain):
     s = "\n"
     if self.original_url:
         if "vine.co" in self.original_url:
             s += vine_warning
         s += "* [Original (%s)](%s)" % (domain, self.original_url)
     if self.gfycat_url:
         gfy_id = get_id(self.gfycat_url)
         urls = self.gfycat_urls(gfy_id)
         s += "\n\n"
         s += "* [Gfycat](%s) | [mp4](%s) - [webm](%s) - [gif](%s)" % (
             self.gfycat_url, urls[0], urls[1], urls[2])
     if self.offsided_url:
         s += "\n\n"
         s += "* [Offsided](%s) | " % self.offsided_url
         for mediaType, url in self.offsided_urls(get_id(self.offsided_url)):
             s += "[%s](%s) - " % (mediaType, url)
         s = s[0:-2]  # Shave off the last "- "
     if self.imgur_url:
         s += "\n\n"
         s += "* [Imgur](%s) | " % self.imgur_url
         for mediaType, url in self.imgur_urls(get_id(self.imgur_url)):
             s += "[%s](%s) - " % (mediaType, url)
         s = s[0:-2]  # Shave off the last "- "
     if self.streamable_url:
         s += "\n\n"
         s += "* [Streamable](%s) | " % self.streamable_url
         for mediaType, url in self.streamable_urls(get_id(self.streamable_url)):
             s += "[%s](%s) - " % (mediaType, url)
         s = s[0:-2]  # Shave off the last "- "
     s += "\n"
     return s
Esempio n. 3
0
 def comment_string(self):
     s = "\n"
     if self.original_url:
         if "vine.co" in self.original_url:
             s += vine_warning
         s += "* [Original](%s)" % self.original_url
     if self.gfycat_url:
         gfy_id = get_id(self.gfycat_url)
         urls = self.gfycat_urls(gfy_id)
         s += "\n\n"
         s += "* [Gfycat](%s) | [mp4](%s) - [webm](%s) - [gif](%s)" % (
             self.gfycat_url, urls[0], urls[1], urls[2])
     if self.mediacrush_url:
         s += "\n\n"
         mc_id = get_id(self.mediacrush_url)
         s += "* [Mediacrush](%s) | " % self.mediacrush_url
         s += "[mp4](%s)" % self.mc_url("mp4", mc_id)
         s += " - [webm](%s)" % self.mc_url("webm", mc_id)
         if "gfycat" not in self.original_url:
             s += " - [gif](%s)" % self.mc_url("gif", mc_id)
         s += " - [ogg](%s)" % self.mc_url("ogv", mc_id)
     if self.fitbamob_url:
         s += "\n\n"
         s += "* [Fitbamob](%s)" % self.fitbamob_url
         # TODO Re-enable this when possible
         # fit_id = get_id(self.fitbamob_url)
         # urls = self.fitbamob_urls(fit_id)
         # s += "* [Fitbamob](%s) | [mp4](%s) - [webm](%s) - [gif](%s)" % (
         #     self.fitbamob_url, urls[0], urls[1], urls[2])
     if self.imgur_url:
         s += "\n\n"
         s += "* [Imgur](%s) (gif only)" % self.imgur_url
     s += "\n"
     return s
Esempio n. 4
0
 def delete(self, request, app_name, env_name, app_path):
     env_path = _get_existent_env_path(app_path, env_name)
     stop_patsaks(get_id(request.dev_name, app_name, env_name))
     os.remove(env_path)
     execute_sql(
         'SELECT ak.drop_schema(%s)',
         (get_id(request.dev_name, app_name, env_name),))
     return HttpResponse()
Esempio n. 5
0
def encode_fs_directory(g, basedir, subject_id, hostname=None,
                        n_items=100000):
    """ Convert a FreeSurfer directory to a PROV graph
    """
    # directory collection/catalog
    collection_hash = get_id()
    fsdir_collection = g.collection(collection_hash)
    fsdir_collection.add_extra_attributes({prov.PROV['type']: fs['subject_directory'],
                                           fs['subject_id']: subject_id})
    directory_id = g.entity(get_id())
    if hostname == None:
        hostname = getfqdn()
    url = "file://%s%s" % (hostname, os.path.abspath(basedir))
    directory_id.add_extra_attributes({prov.PROV['location']: prov.URIRef(url)})
    g.wasDerivedFrom(fsdir_collection, directory_id)

    a0 = g.activity(get_id(), startTime=dt.isoformat(dt.utcnow()))
    user_agent = g.agent(get_id(),
                         {prov.PROV["type"]: prov.PROV["Person"],
                          prov.PROV["label"]: pwd.getpwuid(os.geteuid()).pw_name,
                          foaf["name"]: pwd.getpwuid(os.geteuid()).pw_name})
    g.wasAssociatedWith(a0, user_agent, None, None,
                        {prov.PROV["Role"]: nidm["LoggedInUser"]})
    g.wasGeneratedBy(fsdir_collection, a0)

    i = 0
    for dirpath, dirnames, filenames in os.walk(os.path.realpath(basedir)):
        for filename in sorted(filenames):
            if filename.startswith('.'):
                continue
            i += 1
            if i > n_items:
                break
            file2encode = os.path.realpath(os.path.join(dirpath, filename))
            if not os.path.isfile(file2encode):
                print "%s not a file" % file2encode
                continue
            ignore_key_found = False
            for key in ignore_list:
                if key in file2encode:
                    ignore_key_found = True
                    continue
            if ignore_key_found:
                continue
            try:
                entity = create_entity(g, subject_id, file2encode, hostname)
                g.hadMember(fsdir_collection, entity.get_identifier())
            except IOError, e:
                print e
Esempio n. 6
0
File: main.py Progetto: bworrell/ipc
def _evaluate(root, hash):
    hash_observables = utils.get_observables_with_hashes(root)

    for o in hash_observables:
        id_ = utils.get_id(o)
        props = utils.get_properties(o)
        hashes = utils.get_hashes(o)

        is_match = any(utils.matches(x, hash) for x in hashes)
        is_singleton = len(props) == 1  # TODO: Does the object only contain a Hashes element?
        is_negated = bool(o.attrib.get('negate', False))

        if is_match and is_singleton:
            result = RESULT_MATCH
        elif is_match and not(is_singleton):
            result = RESULT_PARTIAL
        else:
            result = RESULT_UNMATCHED

        if is_negated and result == RESULT_MATCH:
            result = RESULT_UNMATCHED

        if is_negated and result == RESULT_UNMATCHED:
            result = RESULT_MATCH

        OBSERVABLE_TO_RESULT[id_] = result
Esempio n. 7
0
 def post(self, request, app_name, app_path):
     env_name = request.data['name']
     check_name(env_name)
     env_path = _get_absent_env_path(app_path, env_name)
     execute_sql(
         CREATE_SCHEMA_SQL, (get_id(request.dev_name, app_name, env_name),))
     write_file(env_path, env_name)
     return HttpResponse(status=CREATED)
Esempio n. 8
0
 def __init__(self, **kw):
     self.id = kw.get('id', utils.get_id())
     self.etag = kw.get('etag', "ETAG-%s" % self.id)
     self.title = kw.get('title', '')
     self.updated = kw.get('updated', utils.now())
     self.selfLink = kw.get('selfLink', "self_link_url_not_implemented")
     self.tasks = {}
     self.kind = "tasks#taskList"
 def parse_transfer_history(self):
     transfers = self.tree.xpath("//tr[@class='zeile-transfer']")
     try:
         for transfer in transfers:
             date = transfer.xpath('td[@class="zentriert hide-for-small"]')[1]\
                             .text.strip()
             origin = transfer.xpath('*/a[@class="vereinprofil_tooltip"]')[2]\
                             .get('href')
             origin_id = get_id(origin)
             dest = transfer.xpath('*/a[@class="vereinprofil_tooltip"]')[5]\
                             .get('href')
             dest_id = get_id(dest)
             value = transfer.xpath('td[@class="zelle-abloese"]')[0].text\
                             .strip().encode('ascii', 'ignore')
             self.transfer_history.append(
                 Transfer(date, origin_id, dest_id, value)
             )
     except Exception as e:
         print "could not load transfers for " + self.name
Esempio n. 10
0
 def delete(self, request, app_name, app_path):
     app_id = get_id(request.dev_name, app_name)
     stop_patsaks(app_id)
     domains = json.loads(read_file(app_path.domains).lower())
     _purge_domains(domains)
     for domain in domains:
         os.remove(ROOT.domains[domain])
     try:
         os.remove(ROOT.devs[request.dev_name].libs[app_name])
     except OSError, error:
         assert error.errno == errno.ENOENT
Esempio n. 11
0
 def put(self, request, app_name, app_path, path):
     stop_patsaks(get_id(request.dev_name, app_name) + ':')
     parts = _check_path(path)
     try:
         write_file(app_path.code + '/' + path, request.raw_post_data)
     except IOError, error:
         raise (
             Error('"%s" is a folder.' % path)
             if error.errno == errno.EISDIR else
             Error('The folder "%s" doesn\'t exist.' % '/'.join(parts[:-1]),
                   status=NOT_FOUND))
Esempio n. 12
0
def populate_teams():
    for i in xrange(100):
        filename = data_path + "team" + str(i) + ".p"
        with open(filename, "r") as file:
            team = pickle.load(file)
            team.name = team.full_name
            team_ids[get_id(team.url_profile)] = team.name
            models.Team.objects.update_or_create(name = team.name, full_name = team.full_name, country = team.country, badge = team.badge)
            print "Team " + repr(team.name) + " was added to the database"
            populate_manager(team)
            populate_players(team)
Esempio n. 13
0
 def post(self, request, app_name, app_path):
     command, args = parse_git_command(request.data['command'])
     host_id = get_id(request.dev_name, app_name)
     if command in ('commit', 'merge', 'pull', 'rebase', 'reset', 'revert'):
         _purge_domains(json.loads(read_file(app_path.domains)))
     else:
         host_id += ':'
     stop_patsaks(host_id)
     return HttpResponse(
         _make_git_runner(request, app_name).run(command, *args),
         'text/plain; charset=utf-8')
Esempio n. 14
0
 def print_action(self, desc):
     info = '[%s]' % utils.get_colored_text('lpurple', utils.get_id())
     if self.drone:
         info += ' [level=%s]' % utils.get_colored_text('yellow', str(self.drone.level))
         if self.drone.leader:
             info += ' [leader=%s]' % utils.get_colored_text('yellow', self.drone.leader)
         if self.drone.starvation:
             info += ' [%s]' % utils.get_colored_text('lblue', 'starvation')
         info += ' [health=%s]' % utils.get_colored_text('lred', str(self.drone.inventory.get('nourriture', 0)))
         if self.real_fork_timer:
             info += ' [birth t=-%s]' % utils.get_colored_text('lgreen', str(self.real_fork_timer))
     print('%s %s' % (info, desc))
Esempio n. 15
0
 def post(self, request, app_name, app_path):
     _purge_domains(json.loads(read_file(app_path.domains)))
     stop_patsaks(get_id(request.dev_name, app_name))
     git_runner = _make_git_runner(request, app_name)
     git_runner.run('add', '-A')
     args = ['commit', '-qm', request.data['message']]
     if request.data.get('amend'):
         args += ('--amend', '--reset-author')
     output = git_runner.run(*args)
     if output:
         raise Error(output)
     return HttpResponse()
Esempio n. 16
0
 def post(self, request, app_name, app_path):
     stop_patsaks(get_id(request.dev_name, app_name) + ':')
     prefix = app_path.code + '/'
     action = request.data['action']
     if action == 'mkdir':
         path = request.data['path']
         _check_path(path)
         try:
             os.makedirs(prefix + path)
         except OSError, error:
             assert error.errno == errno.EEXIST
             raise Error('The entry "%s" already exists.' % path,
                         'Please choose another name.')
         return HttpResponse(status=CREATED)
Esempio n. 17
0
 def __init__(self, **kw):
     self.kind = "tasks#task"
     self.id = kw.get('id', utils.get_id())
     self.etag = kw.get('etag', "ETAG-%s" % self.id)
     self.title = kw.get('title', '')
     self.updated = kw.get('updated', utils.now())
     self.selfLink = kw.get('selfLink', "self_link_not_implemented")
     self.parent = kw.get('parent', '')
     self.position = kw.get('position', 'z' * 26)
     self.notes = kw.get('notes', '')
     self.status = kw.get('status', '')
     self.due = kw.get('due', '')
     self.completed = kw.get('completed', '')
     self.deleted = kw.get('deleted', '')
     self.hidden = kw.get('hidden', '')
     self.links = [] # Not Impl.
Esempio n. 18
0
    def process_block(self):
        block, self.block = self.block, []
        self.previous_score = (0,0)

        if block:
            try:
                date, city, location = self.process_date_location(block[0])
            except:
                import pdb; pdb.set_trace()
                print(block[0])

            team1, team2, team1_score, team2_score = self.process_score(block[1])
            attendance, referee, linesmen = self.process_attendance_ref(block[2])
            
            g = {
                'gid': get_id(),
                'competition': self.competition,
                'season': self.season,
                'date': date,

                'team1': team1,
                'team2': team2,
                'team1_score': team1_score,
                'team2_score': team2_score,
                #'team1_result': team1_result,
                #'team2_result': team2_result,
                #'home_team': home_team,
                #'neutral': neutral,

                'location': location,
                'referee': referee,
                #'linesmen': linesmen,
                'attendance': attendance,
                'minigame': False,
                'forfeit': False,
                'sources': self.sources[:],
                }
            self.current_game = g
            self.games.append(g)

            self.appearances.extend(self.process_lineup(block[3]))
            self.appearances.extend(self.process_lineup(block[4]))

        if len(block) > 5:
            self.process_remaining_lines(block[5:])
Esempio n. 19
0
def to_graph(subject_specific_dir):
    # location of FreeSurfer $SUBJECTS_DIR
    basedir = os.path.abspath(subject_specific_dir)
    subject_id = basedir.rstrip(os.path.sep).split(os.path.sep)[-1]

    # location of the ProvToolBox commandline conversion utility
    graph = prov.ProvBundle(identifier=get_id())
    graph.add_namespace(foaf)
    graph.add_namespace(dcterms)
    graph.add_namespace(fs)
    graph.add_namespace(nidm)
    graph.add_namespace(niiri)
    graph.add_namespace(obo)
    graph.add_namespace(nif)
    graph.add_namespace(crypto)

    graph = encode_fs_directory(graph, basedir, subject_id)
    return graph
Esempio n. 20
0
def create_entity(graph, fs_subject_id, filepath, hostname):
    """ Create a PROV entity for a file in a FreeSurfer directory
    """
    # identify FreeSurfer terms based on directory and file names
    _, filename = os.path.split(filepath)
    relpath = filepath.split(fs_subject_id)[1].lstrip(os.path.sep)
    fstypes = relpath.split('/')[:-1]
    additional_types = relpath.split('/')[-1].split('.')

    file_md5_hash = hash_infile(filepath, crypto=hashlib.md5)
    file_sha512_hash = hash_infile(filepath, crypto=hashlib.sha512)
    if file_md5_hash is None:
        print('Empty file: %s' % filepath)

    #url = "file://%s%s" % (hostname, filepath)
    url = filepath
    url_get = prov.URIRef("http://localhost/file?file=%s" % filepath)
    url_get = prov.URIRef("file://%s" % filepath)
    obj_attr = [(prov.PROV["label"], filename),
                (fs["relative_path"], "%s" % relpath),
                (prov.PROV["location"], url_get),
                (crypto["md5"], "%s" % file_md5_hash),
                (crypto["sha"], "%s" % file_sha512_hash)
                ]

    for key in fstypes:
        obj_attr.append((nidm["tag"], key))
    for key in additional_types:
        obj_attr.append((nidm["tag"], key))

    for key, uris in fs_file_map:
        if key in filename:
            if key.rstrip('.').lstrip('.') not in fstypes + additional_types:
                obj_attr.append((nidm["tag"], key.rstrip('.').lstrip('.')))
            for uri in uris:
                if isinstance(uri, tuple):
                    obj_attr.append((uri[0], uri[1]))
                else:
                    obj_attr.append((prov.PROV["type"], uri))
    id = get_id()
    return graph.entity(id, obj_attr)
Esempio n. 21
0
    def put(self, request, app_name, app_path):
        domains_lower = set(json.loads(read_file(app_path.domains).lower()))
        new_domains = []
        new_domains_lower = set()
        has_akshell = False
        with ROOT.locks.domains.acquire_exclusive():
            for new_domain in request.data:
                new_domain_lower = new_domain.lower()
                if new_domain_lower in new_domains_lower:
                    continue
                match = _DOMAIN_RE.match(new_domain_lower)
                if not match:
                    raise Error(
                        '"%s" is not a valid domain name.' % new_domain, '''\
Domain name must consist of two or three parts separated by dots. \
Each part must consist of Latin letters, digits, and hyphens; \
it must not start or end with a hyphen.''')
                if new_domain_lower.endswith(_AKSHELL_SUFFIX):
                    if has_akshell:
                        raise Error(
                            'Only one akshell.com subdomain is provided.')
                    has_akshell = True
                if new_domain_lower not in domains_lower:
                    _check_domain_is_free(new_domain_lower)
                if match.group(1) and match.group(2) not in domains_lower:
                    _check_domain_is_free(match.group(2))
                new_domains.append(new_domain)
                new_domains_lower.add(new_domain_lower)
            app_id = get_id(request.dev_name, app_name)
            stop_patsaks(app_id)
            for new_domain_lower in new_domains_lower.difference(domains_lower):
                write_file(ROOT.domains[new_domain_lower], app_id)
            old_domains_lower = domains_lower.difference(new_domains_lower)
            for old_domain_lower in old_domains_lower:
                os.remove(ROOT.domains[old_domain_lower])
        _purge_domains(old_domains_lower)
        write_file(app_path.domains, json.dumps(new_domains))
        return HttpResponse()
Esempio n. 22
0
def main(gid=None,
         dataset=None,
         dataset_root=None,
         which=None,
         exp_dir=None,
         verbose=False):
    """
    Configs
    """
    GPU_ID = 0  # gpu id or 'None'
    BATCH_SIZE = 32  # batch size when extracting query and gallery features
    IMG_SIZE = (256, 128)
    DATASET = 'market1501'  # market1501, duke
    WHICH = 'last'  # which model to load
    EXP_DIR = './exp/dmml/market1501'
    NORMALIZE_FEATURE = True  # whether to normalize features in evaluation
    NUM_WORKERS = 8

    if gid is not None:
        GPU_ID = gid
    if dataset is not None:
        DATASET = dataset
    if which is not None:
        WHICH = which
    if exp_dir is not None:
        EXP_DIR = exp_dir
    """
    Datasets
    """
    if dataset_root is None:
        # change dataset directories here to your own if needed
        if DATASET == 'market1501':
            dataset_root = '<DATASET_ROOT_MARKET>'
        elif DATASET == 'duke':
            dataset_root = '<DATASET_ROOT_DUKE>'
        else:
            raise NotImplementedError

    print('Generating dataset...')
    eval_transform = transforms.Compose([
        transforms.Resize(IMG_SIZE, interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    if DATASET == 'market1501':
        datasets = {
            x: Market1501(dataset_root, transform=eval_transform, split=x)
            for x in ['gallery', 'query']
        }
        num_classes = 751
    elif DATASET == 'duke':
        datasets = {
            x: DukeMTMC_reID(dataset_root, transform=eval_transform, split=x)
            for x in ['gallery', 'query']
        }
        num_classes = 702

    dataloaders = {
        x: torch.utils.data.DataLoader(datasets[x],
                                       batch_size=BATCH_SIZE,
                                       shuffle=False,
                                       num_workers=NUM_WORKERS)
        for x in ['gallery', 'query']
    }
    print('Done.')
    """
    Model
    """
    print('Restoring model...')

    ### You may need to modify the arguments of the model according to your training settings.

    model = resnet_model(remove_downsample=True)
    # model = resnet_model(num_classes=num_classes, include_top=False, remove_downsample=False)

    model.load_state_dict(torch.load('{}/model_{}.pth'.format(EXP_DIR, WHICH)))
    if GPU_ID is not None:
        model.cuda(GPU_ID)
    model.eval()
    print('Done.')
    """
    Test
    """
    print('Getting image ID...')
    gallery_cam, gallery_label = get_id(datasets['gallery'].imgs,
                                        dataset=DATASET)
    query_cam, query_label = get_id(datasets['query'].imgs, dataset=DATASET)
    print('Done.')

    # Extract feature
    print('Extracting gallery feature...')
    gallery_feature, g_images = extract_feature(
        model,
        dataloaders['gallery'],
        normalize_feature=NORMALIZE_FEATURE,
        gid=GPU_ID,
        verbose=verbose)
    print('Done.')
    print('Extracting query feature...')
    query_feature, q_images = extract_feature(
        model,
        dataloaders['query'],
        normalize_feature=NORMALIZE_FEATURE,
        gid=GPU_ID,
        verbose=verbose)
    print('Done.')

    query_cam = np.array(query_cam)
    query_label = np.array(query_label)
    gallery_cam = np.array(gallery_cam)
    gallery_label = np.array(gallery_label)

    # Evaluate
    print('Evaluating...')
    CMC = torch.IntTensor(len(gallery_label)).zero_()
    ap = 0.0
    for i in range(len(query_label)):
        ap_tmp, CMC_tmp = evaluate(query_feature[i], query_label[i],
                                   query_cam[i], gallery_feature,
                                   gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp

    CMC = CMC.float()
    CMC = CMC / len(query_label)  # average CMC
    print('Done.')
    print('Rank-1: {:.6f} Rank-5: {:.6f} Rank-10: {:.6f} mAP: {:.6f}'.format(
        CMC[0].item(), CMC[4].item(), CMC[9].item(), ap / len(query_label)))

    return ap / len(query_label), CMC
Esempio n. 23
0
import string
import sys
import urllib

import utils

utils.init()

ids = set()
gold = open('dat/semantic_gold.txt', 'w')
for line in open('dat/WikiSimi3000_1.csv'):
    tokens = line.split('\t')
    page_id1 = utils.get_id(urllib.unquote(tokens[0].strip()))
    page_id2 = utils.get_id(urllib.unquote(tokens[1].strip()))
    score = int(tokens[2])
    if page_id1 < 0 or page_id2 < 0:
        sys.stderr.write('no id for line %s\n' % ` line `)
    else:
        gold.write('%s\t%s\t%s\n' % (page_id1, page_id2, score))
        ids.add(page_id1)
        ids.add(page_id2)
gold.close()

gold_ids = open('dat/semantic_gold_ids.txt', 'w')
for id in ids:
    gold_ids.write('%s\n' % id)
gold_ids.close()
        for x in ['gallery', 'query']
    }
    dset_loaders = {
        x: torch.utils.data.DataLoader(image_dsets[x],
                                       batch_size=args.batch_size,
                                       shuffle=False,
                                       num_workers=args.num_workers)
        for x in ['gallery', 'query']
    }

class_names = image_dsets['query'].classes
use_gpu = torch.cuda.is_available()

gallery_path = image_dsets['gallery'].imgs
query_path = image_dsets['query'].imgs
gallery_cam, gallery_label = get_id(gallery_path)
query_cam, query_label = get_id(query_path)

if args.multi:
    mquery_path = image_dsets['multi-query'].imgs
    mquery_cam, mquery_label = get_id(mquery_path)

#####################################################################
#
# Load the trained model
#
#####################################################################
print('[i] loading the trained model...')
if args.PCB:
    model_structure = PCB(751)
else:
Esempio n. 25
0
"""
Construct input graph as an example. this code will be removed from the
production version, but shows an example of what the graph of the required
input should look like from a provenance model standpoint
"""
from uuid import uuid1
import hashlib
from socket import getfqdn
from utils import hash_infile, foaf, niiri, nif, crypto, get_id
from utils import prov as pm

T1s = [os.path.abspath('SAD_024.nii.gz')]
subject = 'SAD_024'

ingraph = pm.ProvBundle(identifier=get_id())
ingraph.add_namespace(foaf)
ingraph.add_namespace(niiri)
ingraph.add_namespace(nif)
ingraph.add_namespace(crypto)

agent = ingraph.agent(get_id(),
                      {pm.PROV["type"]: pm.PROV["Person"],
                       foaf["name"]: subject}
                      )
t1_collection = ingraph.collection(get_id())
ingraph.wasAttributedTo(t1_collection, agent)
for t1path in T1s:
    file_md5_hash = hash_infile(t1path, crypto=hashlib.md5)
    file_sha512_hash = hash_infile(t1path, crypto=hashlib.sha512)
    url = "file://%s%s" % (getfqdn(), t1path)
Esempio n. 26
0
 def __init__(self, protocol, x, y, team):
     self.actions = \
     {
         'go_ahead':
         [
             {'action': 'see',                                       'score_f': lambda: 1}
         ],
         'turn_right':
         [
             {'action': 'see',                                       'score_f': lambda: 1}
         ],
         'turn_left':
         [
             {'action': 'see',                                       'score_f': lambda: 1}
         ],
         'see':
         [
             {'action': 'spell',                                     'score_f': self.spell_score},
             {'action': 'grab_object',   'param': 'nourriture',      'score_f': lambda: self.grab_score('nourriture')},
             {'action': 'go_ahead',                                  'score_f': lambda: self.move_score('go_ahead')},
             {'action': 'turn_right',                                'score_f': lambda: self.move_score('turn_right')},
             {'action': 'turn_left',                                 'score_f': lambda: self.move_score('turn_left')},
             {'action': 'drop_object',   'param': 'linemate',        'score_f': lambda: self.drop_score('linemate')},
             {'action': 'drop_object',   'param': 'deraumere',       'score_f': lambda: self.drop_score('deraumere')},
             {'action': 'drop_object',   'param': 'sibur',           'score_f': lambda: self.drop_score('sibur')},
             {'action': 'drop_object',   'param': 'mendiane',        'score_f': lambda: self.drop_score('mendiane')},
             {'action': 'drop_object',   'param': 'phiras',          'score_f': lambda: self.drop_score('phiras')},
             {'action': 'drop_object',   'param': 'thystame',        'score_f': lambda: self.drop_score('thystame')},
             {'action': 'grab_object',   'param': 'linemate',        'score_f': lambda: self.grab_score('linemate')},
             {'action': 'grab_object',   'param': 'deraumere',       'score_f': lambda: self.grab_score('deraumere')},
             {'action': 'grab_object',   'param': 'sibur',           'score_f': lambda: self.grab_score('sibur')},
             {'action': 'grab_object',   'param': 'mendiane',        'score_f': lambda: self.grab_score('mendiane')},
             {'action': 'grab_object',   'param': 'phiras',          'score_f': lambda: self.grab_score('phiras')},
             {'action': 'grab_object',   'param': 'thystame',        'score_f': lambda: self.grab_score('thystame')},
             {'action': 'cry',           'param': self.census_msg,   'score_f': lambda: self.unit_score('cry')},
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')},
             {'action': 'fork',                                      'score_f': lambda: self.unit_score('fork')},
             {'action': 'kick',                                      'score_f': self.kick_score},
         ],
         'get_inventory':
         [
             {'action': 'see',                                       'score_f': lambda: self.unit_score('see')},
         ],
         'fork':
         [
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')}
         ],
         'grab_object':
         [
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')}
         ],
         'drop_object':
         [
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')}
         ],
         'spell':
         [
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')}
         ],
         'cry':
         [
             {'action': 'get_inventory',                             'score_f': lambda: self.unit_score('get_inventory')},
             {'action': 'cry',           'param': self.census_msg,   'score_f': lambda: self.unit_score('cry')}
         ],
         'kick':
         [
             {'action': 'see',                                       'score_f': lambda: self.unit_score('see')},
         ]
     }
     (self.protocol, self.x, self.y, self.team) = (protocol, x, y, team)
     self.id = utils.get_id()
     self.level = 1
     self.inventory = {}
     self.last_action = 'get_inventory'
     self.view = [[[]]]
     self.starvation = True
     self.action_count = 0
     self.census = {}
     self.unit_actions = {'cry': 0, 'see': 0, 'fork': 0, 'get_inventory': 0, 'expulse': 0}
     self.volatile_path = []
     self.leader = None
Esempio n. 27
0
    for line in req.text.splitlines():
        if line.startswith('window.initReactApplication'):
            json_line = line[line.index('{'):line.rindex('}') + 1]
            market_data = json.loads(json_line)
            csrf_hash = market_data['options']['csrf_hash']
            break

    sizes = '104-110/56/52,110-116/60/54,116-122/64/57,86-92/52/51,92-98/56/51'.split(
        ',')
    action_list = []

    property_response = get_request(
        f'[{{"action":"addProperty","title":"Размер","type":"text"}}]',
        hash=csrf_hash,
        group_id=group_id)
    prop_id = get_id(property_response)
    sizes_dict = dict()
    for size in sizes:

        action = f'[{{"action": "addVariant", "property_id": {str(int(prop_id))}, "title": "{size}"}}]'
        resp_json = get_request(action, hash=csrf_hash, group_id=group_id)
        item_id = get_id(resp_json)
        sizes_dict[size] = item_id
        if item_id == 0:
            print(f"ERROR in album too much sizes")
            break
        time.sleep(0.3)

    item_id = '5089951'
    for size in sizes:
        action_list.append({"action": "cloneItem", "item_id": item_id})
def parse_stats(fs_stat_file, entity_uri):
    """Convert stats file to a nidm object
    """

    header, tableinfo, measures = read_stats(fs_stat_file)
    g = prov.ProvBundle(identifier=get_id())
    
    # Set the default _namespace name
    #g.set_default_namespace(fs.get_uri())
    g.add_namespace(foaf)
    g.add_namespace(dcterms)
    g.add_namespace(fs)
    g.add_namespace(nidm)
    g.add_namespace(niiri)

    a0 = g.activity(get_id(), startTime=dt.isoformat(dt.utcnow()))
    user_agent = g.agent(get_id(),
                         {prov.PROV["type"]: prov.PROV["Person"],
                          prov.PROV["label"]: pwd.getpwuid(os.geteuid()).pw_name,
                          foaf["name"]: pwd.getpwuid(os.geteuid()).pw_name})
    g.wasAssociatedWith(a0, user_agent, None, None,
                        {prov.PROV["Role"]: nidm["LoggedInUser"]})
    stat_collection = g.collection(get_id())
    stat_collection.add_extra_attributes({prov.PROV['type']: nidm['FreeSurferStatsCollection']})
    # header elements
    statheader_collection = g.entity(get_id())
    attributes = {prov.PROV['type']: fs['stat_header']}
    for key, value in header.items():
        attributes[fs[key]] = value
    statheader_collection.add_extra_attributes(attributes)
    # measures
    struct_info = {}
    measure_list = []
    measure_graph = rdflib.ConjunctiveGraph()
    measure_graph.namespace_manager.bind('fs', fs.get_uri())
    measure_graph.namespace_manager.bind('nidm', nidm.get_uri())
    unknown_units = set(('unitless', 'NA'))
    for measure in measures:
        obj_attr = []
        struct_uri = fs[measure['structure']]
        if measure['source'] == 'Header':
            measure_name = measure['name']
            if measure_name not in measure_list:
                measure_list.append(measure_name)
                measure_uri = fs[measure_name].rdf_representation()
                measure_graph.add((measure_uri,
                                   rdflib.RDF['type'],
                                   fs['Measure'].rdf_representation()))
                measure_graph.add((measure_uri,
                                   rdflib.RDFS['label'],
                                   rdflib.Literal(measure['description'])))
                measure_graph.add((measure_uri,
                                   nidm['units'].rdf_representation(),
                                   rdflib.Literal(measure['units'])))
            obj_attr.append((nidm["AnatomicalAnnotation"], struct_uri))
            if str(measure['units']) in unknown_units:
                valref = prov.Literal(int(measure['value']), prov.XSD['integer'])
            else:
                valref= prov.Literal(float(measure['value']), prov.XSD['float'])
            obj_attr.append((fs[measure_name], valref))
        elif measure['source'] == 'Table':
            obj_attr.append((nidm["AnatomicalAnnotation"], struct_uri))
            for column_info in measure['items']:
                measure_name = column_info['name']
                if column_info['units'] in unknown_units and \
                   '.' not in column_info['value']:
                    valref = prov.Literal(int(column_info['value']),
                                          prov.XSD['integer'])
                else:
                    valref= prov.Literal(float(column_info['value']),
                                         prov.XSD['float'])
                obj_attr.append((fs[measure_name], valref))
                if measure_name not in measure_list:
                    measure_list.append(measure_name)
                    measure_uri = fs[measure_name].rdf_representation()
                    measure_graph.add((measure_uri,
                                       rdflib.RDF['type'],
                                       fs['Measure'].rdf_representation()))
                    measure_graph.add((measure_uri,
                                       rdflib.RDFS['label'],
                                       rdflib.Literal(column_info['description'])))
                    measure_graph.add((measure_uri,
                                       nidm['units'].rdf_representation(),
                                       rdflib.Literal(column_info['units'])))
        id = get_id()
        if struct_uri in struct_info:
            euri = struct_info[struct_uri]
            euri.add_extra_attributes(obj_attr)
        else:
            euri = g.entity(id, obj_attr)
            struct_info[struct_uri] = euri
        g.hadMember(stat_collection, id)
    g.hadMember(stat_collection, statheader_collection)
    g.derivation(stat_collection, entity_uri)
    g.wasGeneratedBy(stat_collection, a0)
    return g, measure_graph
Esempio n. 29
0
 def test_getid(self):
     self.assertEqual(t.get_id('aa'), 'aa')
     self.assertEqual(t.get_id('aa@bb'), 'bb')
     self.assertEqual(t.get_id('aa@bb@cc'), 'cc')
Esempio n. 30
0
import string
import sys
import urllib


import utils

utils.init()

ids = set()
gold = open('dat/semantic_gold.txt', 'w')
for line in open('dat/WikiSimi3000_1.csv'):
    tokens = line.split('\t')
    page_id1 = utils.get_id(urllib.unquote(tokens[0].strip()))
    page_id2 = utils.get_id(urllib.unquote(tokens[1].strip()))
    score = int(tokens[2])
    if page_id1 < 0 or page_id2 < 0:
        sys.stderr.write('no id for line %s\n' % `line`)
    else:
        gold.write('%s\t%s\t%s\n' % (page_id1, page_id2, score))
        ids.add(page_id1)
        ids.add(page_id2)
gold.close()

gold_ids = open('dat/semantic_gold_ids.txt', 'w')
for id in ids:
    gold_ids.write('%s\n' % id)
gold_ids.close()
Esempio n. 31
0
    def consume_row(self, row):
        fields = row.split(self.delimiter)

        if len(fields) == 11:
            competition, season, team, month, day, opponent, location, score, flags, goals, attendance = [e.strip() for e in fields]
            players = []
        elif len(fields) == 12:
            competition, season, team, month, day, opponent, location, score, flags, goals, attendance, players = [e.strip() for e in fields]
        else:
            import pdb; pdb.set_trace()

        # Skipping minigame for now.
        if day in ('M', 'SO', 'OT', 'SO-M'):
            return {}

        # Not played.
        if score == 'np':
            return {}
  
        # Process day before month.
        if day.strip():
            try:
                day = int(day)
            except:
                import pdb; pdb.set_trace()

            # Adjust month if we fall into a new month.
            if self.day is not None:
                if day < self.day:
                    self.month += 1

            self.day = day

        if month.strip():
            self.month = int(month)

        self.year = int(season)

        try:
            d = datetime.datetime(self.year, self.month, self.day)
        except:
            import pdb; pdb.set_trace()

        team_score,  opponent_score = [int(e) for e in score.split(',')]

        competition, stage = competition_map[competition]

        team = get_full_name(team, season)
        opponent = get_full_name(opponent, season)
            
        if location == 'h':
            home_team = team
            home_score = team_score
            away_team = opponent
            away_score = opponent_score
        else:
            home_team = opponent
            home_score = opponent_score
            away_team = team
            away_score = team_score

        if not attendance.strip():
            attendance = None
        else:
            try:
                attendance = int(attendance)
            except:
                import pdb; pdb.set_trace()


        shootout_winner = None
        if flags.strip() == '*':
            if home_score > away_score:
                shootout_winner = home_team
            elif home_score < away_score:
                shootout_winner = away_team
            else:
                import pdb; pdb.set_trace()

            home_score = away_score = min(home_score, away_score)

        gid = get_id()

        game_data = {
            'gid': gid,
            'competition': competition,
            'stage': stage,
            'season': season,
            'date': d,
            'team1': home_team,
            'team2': away_team,
            'team1_score': home_score,
            'team2_score': away_score,
            'home_team': home_team,
            'attendance': attendance,
            'shootout_winner': shootout_winner,
            'source': 'NASL - A Complete Record of the North American Soccer League',

            }

        goal_list = []


        for goal in goals.split(','):
            m = re.match("(.*?) (\d)", goal)
            if m:
                goal, count = m.groups()
                count = int(count)
            elif goal.strip():
                count = 1
            else:
                # Empty results.
                count = 0


            for e in range(count):
                goal_list.append({
                        'gid': gid,
                        'team': team,
                        'season': season,
                        'competition': competition,
                        'date': d,
                        'goal': goal,
                        'minute': None,
                        'assists': []
                        })

        appearance_list = []

        if '*' in players:
            off = None
        else:
            off = 90


        # No duplicates
        #if len(players) != len(set(players)):
        #    import pdb; pdb.set_trace()

        for appearance_code in players:
            if appearance_code == '*':
                appearance_list[-1]['on'] = None
            else:
                roster = self.rosters[(season, team)]
                try:
                    name = player_from_abbreviation(appearance_code, roster)
                except:
                    print(season, team)
                    print(d)
                    print(appearance_code)
                    raise

                appearance_list.append({
                        'gid': gid,
                        'name': name,
                        'on': 0,
                        'off': off,
                        'team': team,
                        'competition': competition,
                        'date': d,
                        'season': season,
                        #'goals_for': goals_for,
                        #'goals_against': goals_against,
                        #'order': None,
                        })
            

        return game_data, goal_list, appearance_list
Esempio n. 32
0
    parser.add_argument('--lr', default=0.1, type=float)
    parser.add_argument('--mom', default=0, type=float)
    parser.add_argument('--wd', default=0, type=float)

    # OTHER
    parser.add_argument('--seed', default=0, type=int)
    parser.add_argument('--double', action='store_true', default=False)
    parser.add_argument('--no_cuda', action='store_true', default=False)
    parser.add_argument('--load_opt', action='store_true', default=False)

    args = parser.parse_args()

    print(args)

    # initial setup
    args.path = get_id(args.path)
    if args.double:
        torch.set_default_tensor_type('torch.DoubleTensor')
    args.use_cuda = not args.no_cuda and torch.cuda.is_available()
    args.device = torch.device('cuda' if args.use_cuda else 'cpu')
    torch.manual_seed(args.seed)

    train_loader, tr_loader_eval, te_loader_eval, num_classes = get_data(args.dataset, args.path, args.bs_train, args.bs_eval, args.data_size)

    # get/load the model, optimizer, and crit
    if args.load_model != '':
        state = torch.load(args.load_model) # gives the state_dict and opt
        args.model = args.load_model.split("/")[-1].split("_")[0] # this is by our saving convention
        model_class = getattr(models, args.model)
        net = model_class(num_classes=num_classes).to(args.device)
        net.load_state_dict(state['weights'])
Esempio n. 33
0
    def consume_row(self, row):
        if not row.strip():
            return {}

        fields = row.strip().split('\t')

        # What is field # 10?
        if len(fields) == 10:
            print("Ten fields for some reason?")
            print(row)
            fields = fields[:9]

        if len(fields) == 9:
            team, season, competition, month, day, opponent, location, score, goals = fields
        elif len(fields) == 8:
            team, season, competition, month, day, opponent, location, score = fields
            goals = ''

        else:
            # A couple of games without scores (forfeits). (len = 7)
            print(fields)
            return {}

        
        # Figure out what year a game was played in. (dates are only partially entered for convenience)
        sx = season # Clean up the season a bit; 
        for e in 'Fall', 'Spring', 'Playoffs', 'First Half', 'Second Half':
            sx = sx.replace(e, '')

        if '-' in season:
            try:
                start_year, end_year = [int(e) for e in sx.split('-')]
            except:
                import pdb; pdb.set_trace()
        else:
            start_year = end_year = int(re.match('^(\d+).*$', sx).groups()[0])

        round = ''
        if 'Half' in season:
            season, round = season.split(' ', 1)

        # Skipping minigames for now.
        if day in ('M', 'SO', 'OT', 'SO-M'):
            return {}

        # Not played.
        if score == 'np':
            return {}

        
        # Process day before month. (huh? why?)
        if day.strip():
            self.day = int(day)

        if month.strip():
            self.month = int(month)

            if self.month >= 8:
                self.year = start_year
            else:
                self.year = end_year

        d = datetime.datetime(self.year, self.month, self.day)

        if score in ('forfeit loss', 'forfeit win', 'awarded', ''):
            return {}

        try:
            team_score,  opponent_score = [int(e) for e in score.split(',')]
        except:
            import pdb; pdb.set_trace()

        competition = competition_map.get(competition, competition)


        if 'Playoffs' in season:
            season = season.replace('Playoffs', '').strip()
            #competition = "%s Playoffs" % competition
            stage = 'playoffs'
        else:
            stage = ''


        if competition in ('US Open Cup', 'AFA Cup', 'American Cup'):
            return {}

        team = get_full_name_stats(team, season)
        opponent = get_full_name_stats(opponent, season)
            
        if location == 'h':
            home_team = team
            home_score = team_score
            away_team = opponent
            away_score = opponent_score
        else:
            home_team = opponent
            home_score = opponent_score
            away_team = team
            away_score = team_score

        gid = get_id()

        game_data =  {
            'gid': gid,
            'competition': competition,
            'season': season,
            'date': d,
            'team1': home_team,
            'team2': away_team,
            'team1_score': home_score,
            'team2_score': away_score,
            'home_team': home_team,
            'sources': ['American Soccer League (1921-1931)',],
            'round': round,
            'stage': stage,
            }

        goal_list = []


        for goal in goals.split(','):
            m = re.match("(.*?) (\d)", goal)
            if m:
                goal, count = m.groups()
                count = int(count)
            elif goal.strip():
                count = 1
            else:
                # Empty results.
                count = 0


            for e in range(count):
                goal_list.append({
                        'gid': gid,
                        'team': team,
                        'season': season,
                        'competition': competition,
                        'date': d,
                        'goal': goal,
                        'minute': None,
                        'assists': []
                        })

        return game_data, goal_list
Esempio n. 34
0
    def parse(self, response):
        '''
        file = open("a.html", 'w')
        file.write(response._body)
        file.close()
        return
        '''
        ret_items = []
        next_url = self.get_next_page(response)
        if next_url:
            ret_items.append(Request(url=next_url,dont_filter=True,callback=self.parse))
        log.msg('url ' + response._url, level = log.DEBUG)
        #return ret_items 
        hxs = HtmlXPathSelector(response)
        h_div_array = hxs.select('//body/div[@class="area"]/div[@class="dealbox"]/div')
        i = 0
        for h_div in h_div_array:
            xpath_list = [
                ['login_url', 'div/p/a/@href', 'string', None],
                ['img_url', 'div/p/a/img/@src', 'string', None],
                ['origin_img_url', 'div/p/a/img/@data-original', 'string', None],
                ['title', 'div/h2/a/text()', 'string', None],
                ['current_price_yuan_str', 'div/h4/span/em/text()', 'string', None],
                ['current_price_fen_str', 'div/h4/span/em/em/text()', 'string', ""],
                ['origin_price', 'div/h4/span/i/text()', 'get_float_str_to_fen', None],
                ['start_time_str', 'div/h5/span/text()', 'string', None],
            ]
            attr_dict = get_attr(xpath_list, h_div)
            if not attr_dict:
                continue
            start_time = get_datetime(attr_dict['start_time_str'])
            if start_time < utils.get_default_start_time():
                log.msg('skip too old time ', level = log.DEBUG)
                continue

            current_price = get_float_str_to_fen(attr_dict['current_price_yuan_str'] + attr_dict['current_price_fen_str'])
            i = i + 1
            log.msg('opentab count ' + str(i), level = log.DEBUG)
            if i % 5 == 0:
                self.recreate_driver()
                log.msg('recreate_driver i ' + str(i), level = log.DEBUG)
            if not utils.get_url_by_browser(self.driver, attr_dict['login_url']):
                continue
            log.msg('after get_url_by_browser ', level = log.DEBUG)
            #a_obj = self.driver.find_element_by_xpath('//body/div[@id="dialog_out_weldeal"]/div[@class="diginfo"]/div[@class="weloutdialog"]/div[@id="ppLogin"]/form[@name="loginform"]/ul/li[@class="reg"]/a')
            a_obj = utils.find_element_by_xpath(self.driver, '//body/div[@id="dialog_out_weldeal"]/div[@class="diginfo"]/div[@class="weloutdialog"]/div[@id="ppLogin"]/form[@name="loginform"]/ul/li[@class="reg"]/a')
            log.msg('after find_element_by_xpath ', level = log.DEBUG)
            if not a_obj:
                log.msg('failed to get url from login_url ' + attr_dict['login_url'], level = log.WARNING)
                continue
            a_obj.click()
            log.msg('after click ', level = log.DEBUG)
            #time.sleep(2)

            #url = self.driver.current_url
            origin_url = utils.get_current_url(self.driver)
            if not origin_url:
                continue
            if origin_url.find('http://s.click.taobao.com') != -1 :
                log.msg('skip invalid url ' + origin_url, level = log.DEBUG)
                continue
            url = origin_url.split('&')[0]
            #url = 'http://www.example.com'
            log.msg('after current_url ' + url, level = log.DEBUG)

            pic_url, item_url, baoyou, cid = utils.get_taobao_item_info(utils.get_id(url))
            if not item_url:
                log.msg('failed to get item info url ' + url, level = log.DEBUG)
                continue

            origin_category_name, category_name = self.cg.get_cid_name(cid)
            log.msg('origin_category_name ' + origin_category_name + ' category_name ' + category_name + ' title ' + attr_dict['title'] + ' url ' + url, level = log.DEBUG)
            
            discount = get_discount(current_price, attr_dict['origin_price'])
            self.log("discount " + str(discount), level = log.DEBUG)

            if attr_dict.has_key('origin_img_url') and attr_dict['origin_img_url'][-4:] == '.jpg':
                img_url = attr_dict['origin_img_url']
            elif attr_dict.has_key('img_url') and attr_dict['img_url'][-4:] == '.jpg':
                img_url = attr_dict['img_url']
            else:
                log.msg('skip invalid img_url ' + attr_dict['img_url'], level = log.WARNING)
                continue

            prod = Zhe800BaoyouItem()
            prod['link'] = url
            prod['id'] = hashlib.md5(prod['link']).hexdigest().upper()
            prod['title'] = attr_dict['title']
            prod['img'] = img_url
            prod['ori_price'] = attr_dict['origin_price']
            prod['cur_price'] = current_price
            prod['discount'] = discount
            prod['stat'] = utils.BEGIN
            prod['sale'] = UNKNOWN_NUM
            prod['sale_percent'] = UNKNOWN_NUM
            prod['display_time_begin'] = start_time
            prod['display_time_end'] = utils.get_default_end_time()
            #prod['display_time_end'] = start_time
            #prod['actual_time_begin'] = start_time
            #prod['actual_time_end'] = start_time
            prod['limit'] = UNLIMITED_NUM
            prod['source'] = self.display_name
            prod['origin_category_name'] = origin_category_name
            prod['category_name'] = category_name
            ret_items.append(prod)
            if debug :
                break
        return ret_items
Esempio n. 35
0
def process_submission(submission):
    new_mirror = MirroredObject(submission.id, submission.url)

    already_gfycat = False
    already_imgur = False

    url_to_process = submission.url

    if submission.domain == "vine.co":
        url_to_process = retrieve_vine_video_url(url_to_process)
    elif submission.domain == "v.cdn.vine.co":
        url_to_process = retrieve_vine_cdn_url(url_to_process)
    elif submission.domain == "gfycat.com":
        already_gfycat = True
        new_mirror.gfycat_url = url_to_process
        url_to_process = get_gfycat_info(get_id(url_to_process))['mp4Url']
    elif submission.domain == "mediacru.sh":
        new_mirror.mediacrush_url = url_to_process
        url_to_process = "https://cdn.mediacru.sh/%s.mp4" % get_id(url_to_process)
    elif submission.domain == "fitbamob.com":
        new_mirror.fitbamob_url = url_to_process
        url_to_process = get_fitbamob_info(get_id(url_to_process))['mp4_url']

    if submission.domain == "giant.gfycat.com":
        # Just get the gfycat url
        url_to_process = url_to_process.replace("giant.", "")
        new_mirror.gfycat_url = url_to_process
        already_gfycat = True

    if submission.domain == "imgur.com" and extension(url_to_process) == ".gif":
        new_mirror.imgur_url = url_to_process
        already_imgur = True

    # Get converting
    log("--Beginning conversion, url to convert is " + url_to_process)
    if not already_gfycat:
        gfy_url = gfycat_convert(url_to_process)
        if gfy_url:
            new_mirror.gfycat_url = gfy_url
            log("--Gfy url is " + new_mirror.gfycat_url)
        else:
            cache_submission(submission)
            return

    if submission.domain != "mediacru.sh":
        # TODO check file size limit (50 mb)
        new_mirror.mediacrush_url = mediacrush_convert(url_to_process)
        log("--MC url is " + new_mirror.mediacrush_url)

    if submission.domain != "fitbamob.com":
        fitba_url = fitbamob_convert(submission.title, url_to_process)
        if fitba_url:
            new_mirror.fitbamob_url = fitba_url
            log("--Fitbamob url is " + new_mirror.fitbamob_url)

    # TODO Re-enable this once "animated = false" issue resolved
    # if not already_imgur:
    # # TODO need to check 10mb file size limit
    #     new_mirror.imgur_url = imgur_upload(submission.title, url_to_process)
    #     log("--Imgur url is " + new_mirror.imgur_url)

    comment_string = comment_intro + new_mirror.comment_string() + comment_info
    add_comment(submission, comment_string)
    cache_submission(submission)
    if not already_gfycat:
        # Take some time to avoid rate limiting. Annoying but necessary
        log('-Waiting 60 seconds', Color.CYAN)
        time.sleep(60)
Esempio n. 36
0
def _stop_dev_patsaks(dev_name):
    for app_name in os.listdir(ROOT.devs[dev_name]):
        stop_patsaks(get_id(dev_name, app_name))
 def fire(self, target_unit):
     #TODO: use a method to add the bullets instead
     self.game.bullets[utils.get_id()] = Bullet(self.bullet_speed, [self.get_map_center()[0], self.get_map_center()[1]], target_unit, self.bullet_color, self.bullet_size, self.sfx, self.damage)
     self.regen = 0
Esempio n. 38
0

if __name__ == "__main__":
    db = Database()

    db.make_query(
        '''
        INSERT into user (username)
        VALUES ('test')
        '''
    )

    import datetime
    from utils import get_id

    db.insert_data(
        table='post',
        username='******',
        title='rapidly losing the will to live over here',
        content='please somebody just kill me now',
        date_posted=datetime.datetime.now(),
        date_published=None,
        post_id=get_id()
    )

    # print(db.make_query(
    #     '''
    #     SELECT * from user
    #     '''
    # ))
Esempio n. 39
0
    parser.add_argument('--stiffness', default=1, type=float)
    parser.add_argument('--epochs', default=3, type=int)
    parser.add_argument('--data_size', default=0, type=int)
    parser.add_argument('--bs_train', default=50, type=int)
    parser.add_argument('--bs_eval', default=50, type=int)
    parser.add_argument('--lr', default=0.1, type=float)
    parser.add_argument('--mom', default=0, type=float)
    parser.add_argument('--wd', default=0, type=float)
    parser.add_argument('--dataset', default='cifar10', type=str)
    parser.add_argument('--data_path', default='~/data', type=str)
    parser.add_argument('--log_path', default='./log', type=str)
    parser.add_argument('--seed', default=0, type=int)
    parser.add_argument('--no_cuda', action='store_true', default=False)

    args = parser.parse_args()
    args.path = get_id(args.data_path)
    args.use_cuda = not args.no_cuda and torch.cuda.is_available()
    args.device = torch.device('cuda' if args.use_cuda else 'cpu')
    torch.manual_seed(args.seed)

    # training setup
    train_loader, tr_loader_eval, te_loader_eval, num_classes = get_data(
        args.dataset, args.data_path, args.bs_train, args.bs_eval,
        args.data_size)
    crit = nn.CrossEntropyLoss().to(args.device)

    net1 = load_net(args.net1_path, num_classes, args.device)
    net2 = load_net(args.net2_path, num_classes, args.device)

    if args.interp_method == 'string':