Exemplo n.º 1
0
def validate(val_loader, model, criterion, args_, profile=False):
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    # switch to evaluate mode
    model.eval()
    durations = []
    if profile:
        start()

    with torch.no_grad():
        for i, (images, target) in enumerate(val_loader):
            torch.cuda.synchronize()
            start_time = time.time()

            if args_.gpu is not None:
                images = images.cuda(args_.gpu, non_blocking=True)
            target = target.cuda(args_.gpu, non_blocking=True)

            # compute output
            output = model(images)

            torch.cuda.synchronize()
            durations.append(time.time() - start_time)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0].item(), images.size(0))
            top5.update(acc5[0].item(), images.size(0))
    if profile:
        stop()

    return top1.avg, top5.avg, durations
Exemplo n.º 2
0
def solve(startingNumbers, nth=2020):
    utils.start()
    i = 0
    spokenNumbers = {}
    spokenNumbersBefore = {}

    while i < len(startingNumbers):
        number = startingNumbers[i]
        i += 1
        if number in spokenNumbers:
            spokenNumbersBefore[number] = spokenNumbers[number]
        spokenNumbers[number] = i

    while i != nth:
        i += 1

        if number in spokenNumbersBefore:
            number = spokenNumbers[number] - spokenNumbersBefore[number]
        else:
            number = 0

        try:
            spokenNumbersBefore[number] = spokenNumbers[number]
        except:
            pass

        spokenNumbers[number] = i

    print("%d (%.2fms)" % (number, utils.stop()))
    return number
Exemplo n.º 3
0
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
                        blk_device, fstype, system_services=[]):
    """
    To be called from the current cluster leader.
    Ensures given pool and RBD image exists, is mapped to a block device,
    and the device is formatted and mounted at the given mount_point.

    If formatting a device for the first time, data existing at mount_point
    will be migrated to the RBD device before being remounted.

    All services listed in system_services will be stopped prior to data
    migration and restarted when complete.
    """
    # Ensure pool, RBD image, RBD mappings are in place.
    if not pool_exists(service, pool):
        utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
        create_pool(service, pool)

    if not rbd_exists(service, pool, rbd_img):
        utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
        create_rbd_image(service, pool, rbd_img, sizemb)

    if not image_mapped(rbd_img):
        utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
        map_block_storage(service, pool, rbd_img)

    # make file system
    # TODO: What happens if for whatever reason this is run again and
    # the data is already in the rbd device and/or is mounted??
    # When it is mounted already, it will fail to make the fs
    # XXX: This is really sketchy!  Need to at least add an fstab entry
    #      otherwise this hook will blow away existing data if its executed
    #      after a reboot.
    if not filesystem_mounted(mount_point):
        make_filesystem(blk_device, fstype)

        for svc in system_services:
            if utils.running(svc):
                utils.juju_log('INFO',
                               'Stopping services %s prior to migrating '\
                               'data' % svc)
                utils.stop(svc)

        place_data_on_ceph(service, blk_device, mount_point, fstype)

        for svc in system_services:
            utils.start(svc)
Exemplo n.º 4
0
def such_as_np(s, np_sent):
    '''
    Given a np chunked sentences, try to extract the concepts

    X--set
    y--set
    '''
    X = set()
    Y = set()
    if re.findall(r'\bsuch\b\s\bas\b', s):
        # extract the such as pattern
        # logging.info(s)

        semi_pairs = relextract.tree2semi_rel(np_sent)
        reldicts = relextract.semi_rel2reldict(semi_pairs)
        # find the first such as
        logging.info(np_sent)
        # pprint(semi_pairs)
        # pprint(reldicts)
        # logging.info(len(reldicts))
        if len(reldicts) > 0:
            try:
                while 'such as' not in reldicts[0]['untagged_filler']:
                    reldicts.pop(0)


                X.add(reldicts[0]['subjsym'])
                Y.add(reldicts[0]['objsym'])

                reldicts.pop(0)

                # find the sub concept
                for reldict in reldicts:
                    if reldict['untagged_filler'] not in [',', 'and', 'or']:
                        Y.add(reldict['subjsym'])
                        break
                    Y.add(reldict['subjsym'])
                    Y.add(reldict['objsym'])
            except Exception as e:
                logging.error(e)
                logging.error(reldicts)
                logging.error('Original sentence: '+s)
        stop()
    return (X, Y)
Exemplo n.º 5
0
def crawler(page_num, category, whole_data=None):

    t1 = time()
    page = BASE_PAGE.format(category, page_num)
    soup = get_soup(page)
    runtime = time() - t1
    timeout = False

    if runtime > 25:
        one_page = None
        last_page = None
        timeout = True

        return one_page, last_page, timeout

    titles = soup.select('.title')
    tags = soup.select('.tags')
    days = soup.select('.date')
    thumbs = soup.select('.img-responsive')
    urls = soup.select('.cover a')

    one_page = []

    for _title, _tag, _url, _thumb, _day in zip(titles, tags, urls, thumbs,
                                                days):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        tag = parse_tag(_tag)
        url = parse_url(_url)
        day = parse_day(_day)

        obj = {
            'corp': CORP,
            'thumb': thumb,
            'title': title,
            'day': day,
            'url': url,
            'category': category,
            'tag': tag
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page, timeout

        one_page.append(obj)

    last_page = False if one_page else True
    print(last_page, ': ', page_num)  # delete

    return one_page, last_page, timeout
Exemplo n.º 6
0
def crawler(page_num, whole_data=None):

    page = BASE_PAGE.format(page_num)
    soup = get_soup(page)

    titles = soup.select('.all_news .title')
    days = soup.select('.date')
    times = soup.select('.time')
    thumbs = soup.select('.all_news img')
    urls = soup.select('.all_news .title')
    cats = soup.select('.all_news a')

    one_page = []

    for _title, _url, _thumb, _day, _time, _cat in zip(titles, urls, thumbs,
                                                       days, times, cats):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        url = parse_url(_url)
        day = parse_day(_day)
        time = parse_time(_time)
        cat = parse_cat(_cat)

        if exception(title, cat):
            continue

        obj = {
            'corp': CORP,
            'thumb': thumb,
            'title': title,
            'day': day,
            'time': time,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(CORP, ': ', title)

        one_page.append(obj)

    last_page = False if one_page else True

    return one_page, last_page
Exemplo n.º 7
0
def crawler(page_num, whole_data=None):
    page = main_page.format(page_num)
    soup = get_soup(page)

    titles = soup.select('.tt')
    thumbs = soup.find_all('div', class_='txt_wrap')
    urls = soup.find_all('div', class_='txt_wrap')
    tags = soup.find_all('div', class_='info')

    one_page = []

    for _title, _tag, _url, _thumb in zip(titles, tags, urls, thumbs):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        tag = parse_tag(_tag)
        url = parse_url(_url)

        soup = get_soup(url)
        cat = parse_cat(soup)
        day, time = parse_day_time(soup)

        if not cat:
            continue

        obj = {
            'corp': corp,
            'thumb': thumb,
            'time': time,
            'title': title,
            'day': day,
            'url': url,
            'tags': tag,
            'category': cat,
            'cat1': cat[0],
            'cat2': cat[1]
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(corp, ': ', title)
        one_page.append(obj)
    last_page = False if one_page else True

    return one_page, last_page
Exemplo n.º 8
0
def crawler(page_num, category, ended_categories, whole_data=None):

    page = BASE_PAGE.format(category, page_num)
    soup = get_soup(page)

    titles = soup.select('.post_header_title h5 a')
    days = soup.select('.post_info_date')
    thumbs = soup.find_all(class_='post_header')
    urls = soup.select('.post_header_title h5 a')
    cats = soup.select('.post_info_cat')[1:]

    one_page = []
    title_list = []

    for _title, _url, _thumb, _day, _cat in zip(titles, urls, thumbs, days,
                                                cats):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        url = parse_url(_url)
        day = parse_day(_day)
        cat = parse_cat(_cat)

        title_list.append(title)
        if duplication(cat, ended_categories):
            continue

        obj = {
            'corp': CORP,
            'thumb': thumb,
            'title': title,
            'day': day,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(CORP, ': ', title)

        if obj:
            one_page.append(obj)
    last_page = False if title_list else True

    return one_page, last_page
Exemplo n.º 9
0
def irobotnews(page_number, update=False, whole_data=None):
    page = main_page.format(page_number)
    html = requests.get(page)
    soup = bs(html.content, 'html.parser')

    titles = soup.select('.ArtList_Title a ')
    urls = soup.select('.ArtList_Title a ')
    days = soup.select('.View_SmFont')
    thumbnails = soup.select('.ArtList_Title')
    categories = soup.select('.ArtList_Title .FontKor')
    corp = soup.title.text

    one_page = []
    last_page = False

    for _title, _day, _thumb, _cat, _url in zip(titles, days, thumbnails,
                                                categories, urls):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        day = parse_day(_day)
        url = parse_url(_url)
        cat = parse_cat(_cat)
        time = parse_time(url)

        obj = {
            'corp': corp,
            'thumb': thumb,
            'time': time,
            'title': title,
            'day': day,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return last_page, one_page

        print(corp, ': ', title)
        one_page.append(obj)

    last_page = True if not one_page else False

    return last_page, one_page
Exemplo n.º 10
0
def crawler(page_num, whole_data=None):
    obj[page_key] = page_num
    my_obj = obj
    page = requests.post(BASE_PAGE, data=my_obj, headers=headers)
    page = page.text.encode('utf8')
    page = page.decode('unicode_escape')

    soup = bs(page, 'html.parser')

    urls = soup.find_all('h3', {'class': 'entry-title'})
    titles = soup.find_all('a', {'class': 'td-image-wrap'})
    datetimes = soup.find_all('time', {'class': 'entry-date'})
    thumbs = soup.find_all('span', {'class': 'entry-thumb'})
    cats = soup.find_all('a', {'class': 'td-post-category'})

    one_page = []

    for _title, _url, _thumb, _cat, _datetime in zip(titles, urls, thumbs,
                                                     cats, datetimes):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        url = parse_url(_url)
        cat = parse_cat(_cat)
        day, time = parse_day_time(_datetime)

        obj_ = {
            'corp': CORP,
            'thumb': thumb,
            'title': title,
            'day': day,
            'time': time,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(CORP, ': ', title)
        one_page.append(obj_)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 11
0
def bikorea(page_num, update, whole_data=None):
    page = main_page.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')

    corp = soup.head.title.text
    thumbs = soup.select('.ArtList_Title')
    cats = soup.select('.ArtList_Title a')
    titles = soup.select('.ArtList_Title a')
    days = soup.select('.FontEng')
    urls = soup.select('.ArtList_Title a')

    one_page = []

    for _title, _day, _url, _thumb, _cat in zip(titles, days, urls, thumbs,
                                                cats):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        day = parse_day(_day)
        url = parse_url(_url)
        cat = parse_cat(_cat)
        time = parse_time(url)

        obj = {
            'corp': corp,
            'thumb': thumb,
            'time': time,
            'title': title,
            'day': day,
            'url': url,
            'category': cat
        }

        if update and stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(corp, ': ', title)
        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 12
0
def crawler(page_num, whole_data=None):
    page = main_page.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')

    titles = soup.select('.post-wrap img')
    thumbs = soup.select('.post-wrap img')
    cats = soup.find_all('span', class_="cat-title")
    urls = soup.select('.image-link')
    datetimes = soup.find_all('time')

    one_page = []

    for _title, _cat, _url, _thumb, _datetime in zip(titles, cats, urls,
                                                     thumbs, datetimes):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        cat = parse_cat(_cat)
        url = parse_url(_url)
        day, time = parse_day_time(_datetime)

        obj = {
            'corp': corp,
            'thumb': thumb,
            'time': time,
            'title': title,
            'day': day,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(corp, ': ', title)
        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 13
0
def crawler(page_num, whole_data=None):
    page = BASE_PAGE.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')

    titles = soup.find_all('span', {'class': 'cate'})
    urls = soup.find_all('span', {'class': 'cate'})
    cats = soup.find_all('span', {'class': 'cate'})
    thumbs = soup.find_all('span', {'class': 'cate'})

    one_page = []

    for _title, _url, _thumb, _cat in zip(titles, urls, thumbs, cats):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        url = parse_url(_url)
        cat = parse_cat(_cat)
        day, time = parse_day_time(url)

        obj = {
            'corp': CORP,
            'thumb': thumb,
            'title': title,
            'day': day,
            'time': time,
            'url': url,
            'category': cat
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(CORP, ': ', title)

        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 14
0
def hellodd(page_num, whole_data=None):
    page = main_page.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')
    content = soup.find('section', {'id': 'section-list'})

    titles_urls = content.find_all('h4', {'class': 'titles'})
    time_category = content.find_all('span', {'class': 'byline'})
    thumbs = content.find_all('li')

    one_page = []

    for _titles_urls, _time_category, _thumb in zip(titles_urls, time_category,
                                                    thumbs):

        thumb = parse_thumb(_thumb)
        title, url = parse_titles_urls(_titles_urls)
        time, day, cat = parse_time_category(_time_category)

        obj = {
            'corp': corp,
            'thumb': thumb,
            'title': title,
            'day': day,
            'url': url,
            'category': cat
        }

        if '[인사]' in title or cat is '인사동정':
            continue

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(corp, ': ', title)
        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 15
0
def crawler(page_num, whole_data=None):
    page = main_page.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')

    corp = soup.head.title.text
    titles = soup.select('.mt-0')
    thumbs = soup.select('.media img')
    days = soup.find_all('time')
    urls = soup.find_all('a', class_='media')

    one_page = []

    for _title, _day, _url, _thumb in zip(titles, days, urls, thumbs):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        day = parse_day(_day)
        url = parse_url(_url)

        obj = {
            'corp': corp,
            'thumb': thumb,
            'title': title,
            'day': day,
            'url': url
        }

        if stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(corp, ': ', title)

        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 16
0
def crawler(page_num, update, whole_data=None):
    page = BASE_PAGE.format(page_num)

    html = requests.get(page)
    soup = bs(html.content, 'html.parser')
    
    titles = soup.find_all('a', {'id':'title'})
    urls = soup.find_all('a', {'id':'title'})
    thumbs = soup.find_all('div', {'id':'image'})
    cats = soup.find_all('div', {'id':'tag_container'})
    days = soup.find_all('div', {'id':'writer'})

    one_page = []
    
    for _title, _day, _url, _thumb, _cat in zip(titles, days, urls, thumbs, cats):

        thumb = parse_thumb(_thumb)
        title = parse_title(_title)
        day = parse_day(_day)
        url = parse_url(_url)
        cat = parse_cat(_cat)
        
        obj = {'corp': CORP,
               'thumb': thumb,
               'title': title,
               'day': day,
               'url': url,
               'category': cat}

        if update and stop(obj, whole_data):
            last_page = True
            return one_page, last_page

        print(CORP, ': ', title)
        one_page.append(obj)
    last_page = True if not one_page else False

    return one_page, last_page
Exemplo n.º 17
0
    help='path or URL of a photo of the faces to be found')

parser.add_argument(
    'candidate',
    help='path or URL of a photo to find expected target faces')

args = parser.parse_args()

# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------
target_url = args.target if is_url(args.target) else get_abspath(args.target)  # Get the photo of target faces
candidate_url = args.candidate if is_url(args.candidate) else get_abspath(args.candidate)  # Get the photo to be checked

if os.path.isdir(target_url) or os.path.isdir(candidate_url):
    stop("Only one photo allowed!")

# ----------------------------------------------------------------------
# Prepare Face API client
# ----------------------------------------------------------------------

# Request subscription key and endpoint from user.
subscription_key, endpoint = get_private()

credentials = CognitiveServicesCredentials(subscription_key)  # Set credentials
client = FaceClient(endpoint, credentials)  # Setup Azure face API client


# ----------------------------------------------------------------------
# Detect faces
# ----------------------------------------------------------------------
Exemplo n.º 18
0
os.makedirs(TEMP_PATH, exist_ok=True)

data = {}
if os.path.exists(ENCODE_FILE):  # Load known faces data if available
    data = load_data(ENCODE_FILE)

# ----------------------------------------------------------------------
# Determine the person's name to match
# ----------------------------------------------------------------------

use_database = False
name = None
if args.name is not None:  # Use the name provided
    name = args.name
elif args.batch:  # Stop if in batch mode
    stop("No name provided!")
else:  # No name provided
    if not args.capture:
        if data != {}:  # Recognise faces of known persons in database
            use_database = True
            name = None
        else:
            name = ask_for_input("\nPlease give a person's name to recognise (For example, Satya)")
    else:
        name = ask_for_input("\nPlease give the person's name to capture")

# ----------------------------------------------------------------------
# Generate face database or load existing one
# ----------------------------------------------------------------------

if not use_database and name in data and args.data is None:  # Face data is available for the name