Beispiel #1
0
def get_people(terminal: Terminal):
    people = []
    known_folders = os.listdir(OUTPUT_CSVS_FRIENDS_DIR)
    try:
        progress_bar = ProgressBar(
            'Loading knowns from .csv files', max=len(known_folders))
        for folder in known_folders:
            knowns_file = '%s/%s/known_people.csv' % (
                OUTPUT_CSVS_FRIENDS_DIR, folder)
            if os.path.exists(knowns_file):
                with open(knowns_file, 'r') as f:
                    reader = csv.DictReader(f)
                    for individual in reader:
                        people.append(individual)
                    f.close()
            progress_bar.next()
        print()
        with open('%s/known_people.csv' % OUTPUT_CSVS_DIR, 'r') as f:
            reader = csv.DictReader(f)
            progress_bar_knowns = ProgressBar(
                'Loading knowns from .csv files', max=sum(1 for row in reader) - 1)
            for individual in reader:
                people.append(individual)
                progress_bar_knowns.next()
            f.close()
    except FileNotFoundError as ex:
        terminal.error(str(ex))
    return uniquify(people)
Beispiel #2
0
def saveAverageImage(kitti_base, pos_labels, shape, fname, avg_num=None):
    num_images = float(len(pos_labels))
    avg_num = min(avg_num, num_images)
    if avg_num is None:
        avg_num = num_images

    # avg_img = np.zeros((shape[0],shape[1],3), np.float32)
    avg_img = np.zeros(shape, np.float32)
    progressbar = ProgressBar('Averaging ' + fname, max=len(pos_labels))
    num = 0
    for label in pos_labels:
        if num >= avg_num:
            break
        num += 1
        progressbar.next()
        sample = getCroppedSampleFromLabel(kitti_base, label)
        # sample = np.float32(sample)

        resized = resizeSample(sample, shape, label)

        resized = auto_canny(resized)
        resized = np.float32(resized)

        avg_img = cv2.add(avg_img, resized / float(avg_num))
    progressbar.finish()

    cv2.imwrite(fname, avg_img)
Beispiel #3
0
def compete(strategies, num_rounds=1000):
    """Create and run No Thanks competition."""

    # Play 3, 4, and 5 player games
    game_sizes = [3, 4, 5]

    # A dictionary of scores
    results = {}
    for num_players in game_sizes:
        results[num_players] = {}
        for strategy in strategies:
            results[num_players][strategy] = 0

    for num_players in game_sizes:
        for _ in ProgressBar(
                'Playing {}-player games'.format(num_players)).iter(
                    range(num_rounds)):
            selected_strategies = choices(strategies, k=num_players)
            players = [import_module(s).Player() for s in selected_strategies]
            winners, _ = nothanks.Game(players).run()

            for strategy, player in zip(selected_strategies, players):
                results[num_players][strategy] -= 1 / num_players / num_rounds
                if id(player) in winners:
                    results[num_players][strategy] += 1 / len(
                        winners) / num_rounds

    results = pd.DataFrame(results)
    results['combined'] = results.sum(axis=1)
    results.loc['total', :] = results.sum(axis=0)
    return results
Beispiel #4
0
def average_image(pos_region_generator, shape, avg_num=None):
    pos_regions = list(pos_region_generator)

    num_images = float(len(pos_regions))
    if avg_num is None:
        avg_num = num_images
    else:
        avg_num = min(avg_num, num_images)

    window_dims = (shape[1], shape[0])

    # avg_img = np.zeros((shape[0],shape[1],3), np.float32)
    avg_img = np.zeros(shape, np.float32)
    progressbar = ProgressBar('Averaging ', max=avg_num)
    num = 0
    for reg in pos_regions:
        if num >= avg_num:
            break
        num += 1
        progressbar.next()

        resized = reg.load_cropped_resized_sample(window_dims)

        resized = auto_canny(resized)
        resized = np.float32(resized)

        avg_img = cv2.add(avg_img, resized / float(avg_num))
    progressbar.finish()

    return avg_img
Beispiel #5
0
def save_regions(reg_gen, num_regions, window_dims, save_dir):
    progressbar = ProgressBar('Saving regions', max=num_regions)
    index = 0
    for img_region in itertools.islice(reg_gen, 0, num_regions):
        fname = os.path.join(save_dir, '{:06d}.png'.format(index))
        index += 1
        sample = img_region.load_cropped_resized_sample(window_dims)
        cv2.imwrite(fname, sample)
        progressbar.next()
    progressbar.finish()
Beispiel #6
0
    def train(self, points, labels, Niter=1e6, eta=0.05):

        # weights, biases, number of layers, number of classes
        W, b, L, C = self.W, self.b, len(self.b), len(self.b[-1])
        # declare arrays
        a, δ, D = [None] * L, [None] * L, [None] * L
        # value of cost function at each iteration
        costvals = []

        # let the random sequence (indices) of points selected for stochastic gradient descent be
        seq = np.random.randint(low=0, high=len(points), size=int(Niter))

        # loop over the seq with a progress bar
        for itr in ProgressBar("Training").iter(range(len(seq))):

            # the random training point
            k = seq[itr]
            x, y = points[k], int(labels[k])
            # 1-of-K representation for label
            yy = [int(y == c) for c in range(C)]

            # --- forward pass ---
            a[0] = x
            # For l = 1,...,L-1
            for l in range(1, L):
                # a[l] = σ( z[l] ) =  σ( W[l] a[l-1] + b[l] )
                a[l] = sigmoid(W[l] @ a[l - 1] + b[l])
                # D[l] = diag( σ'(z[l]) ) = diag( a[l]*(1-a[l]) )
                D[l] = np.diag(a[l] * (1 - a[l]))

            # --- backward pass ---
            # δ[L] = D[L]( a[L] − y(x{k}) )
            δ[L - 1] = D[L - 1] @ (a[L - 1] - yy)
            # For l = L-2,...,1
            for l in reversed(range(1, L - 1)):
                # δ[l] = D[l] * ( W[l+1]^T * δ[l+1] )
                δ[l] = D[l] @ (W[l + 1].T @ δ[l + 1])

            # --- gradient step ---
            for l in range(1, L):
                # W[l] <= W[l] - η δ[l] a[l-1]^T
                self.W[l] = W[l] - eta * np.outer(δ[l], a[l - 1])
                # b[l] <= b[l] - η δ[l]
                self.b[l] = b[l] - eta * δ[l]

            # --- cost function (calc every kth iteration) ---
            if itr % costskip == 0:
                currcost = self.total_cost(points, labels)
                costvals.append([itr, currcost])

        # return cost values
        return np.array(costvals)
Beispiel #7
0
 def __run_list(self, net, dataloader, output_dir):
     pbar = ProgressBar('Processing', max=len(dataloader), width=48)
     for _, batch_data in enumerate(dataloader):
         imgs_input, codenames, sizes = batch_data  
         imgs_recon = self.__infer_step(imgs_input, net)          
         for idx in range(0, imgs_recon.shape[0]):
             orig_img_size = sizes[idx].numpy()
             recon_img = cv2.cvtColor(imgs_recon[idx], cv2.COLOR_RGB2BGR)
             recon_img = utils.cropping_center(recon_img, orig_img_size)
             cv2.imwrite('%s/%s.jpg' % (output_dir, codenames[idx]), recon_img)
         pbar.next()
     pbar.finish()
     return        
Beispiel #8
0
def fetch_photos(terminal):
    terminal.write('-' * 44)
    knowns = []
    try:
        knowns = facebook_api.get_knowns()
    except IOError as ex:
        token_not_generated(terminal, ex)
    progress_bar = ProgressBar('Saving knowns photos', max=len(knowns))
    for known in knowns:
        picture = facebook_api.get_profile_picture(known['id'])
        known['picture'] = picture
        progress_bar.next()
    save_list_of_dicts('%s/known_people.csv' %
                       OUTPUT_CSVS_DIR, knowns, ['id', 'name', 'picture'])
    terminal.info('%s knowns found' % len(knowns))
Beispiel #9
0
def find_label_clusters(kitti_base,
                        kittiLabels,
                        shape,
                        num_clusters,
                        descriptors=None):
    if descriptors is None:
        progressbar = ProgressBar('Computing descriptors',
                                  max=len(kittiLabels))
        descriptors = []
        for label in kittiLabels:
            progressbar.next()
            img = getCroppedSampleFromLabel(kitti_base, label)
            # img = cv2.resize(img, (shape[1], shape[0]), interpolation=cv2.INTER_AREA)
            img = resizeSample(img, shape, label)
            hist = get_hog(img)
            descriptors.append(hist)
        progressbar.finish()
    else:
        print 'find_label_clusters,', 'Using supplied descriptors.'
        print len(kittiLabels), len(descriptors)
        assert (len(kittiLabels) == len(descriptors))

    # X = np.random.randint(25,50,(25,2))
    # Y = np.random.randint(60,85,(25,2))
    # Z = np.vstack((X,Y))

    # convert to np.float32
    Z = np.float32(descriptors)

    # define criteria and apply kmeans()
    K = num_clusters
    print 'find_label_clusters,', 'kmeans:', K
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    attempts = 10
    ret, label, center = cv2.kmeans(Z, K, None, criteria, attempts,
                                    cv2.KMEANS_RANDOM_CENTERS)
    # ret,label,center=cv2.kmeans(Z,2,criteria,attempts,cv2.KMEANS_PP_CENTERS)

    print 'ret:', ret
    # print 'label:', label
    # print 'center:', center

    # # Now separate the data, Note the flatten()
    # A = Z[label.ravel()==0]
    # B = Z[label.ravel()==1]

    clusters = partition(kittiLabels, label)
    return clusters
Beispiel #10
0
def fetch_emails(terminal):
    terminal.write('-' * 44)
    emails = []
    try:
        knowns = facebook_api.get_knowns()
    except IOError as ex:
        token_not_generated(terminal, ex)
    progress_bar = ProgressBar('Fetching emails', max=len(knowns))
    for known in knowns:
        known_data = facebook_api.get_profile_data(known['id'])
        known['email'] = known_data.get('email', None)
        emails.append(known['email'])
        progress_bar.next()
    save_list_of_dicts('%s/known_emails.csv' %
                       OUTPUT_CSVS_DIR, knowns, ['id', 'name', 'email'])
    terminal.info('%s/%s emails found' % (len(knowns), len(emails)))
Beispiel #11
0
def fetch_all(terminal):
    terminal.write('-' * 44)
    try:
        knowns = facebook_api.get_knowns()
    except IOError as ex:
        token_not_generated(terminal, ex)
    progress_bar = ProgressBar('Fetching basic data', max=len(knowns))
    for known in knowns:
        known_data = facebook_api.get_profile_data(known['id'])
        picture = facebook_api.get_profile_picture(known['id'])
        known['picture'] = picture.name
        known['phone'] = known_data.get('mobile_phone', None)
        known['email'] = known_data.get('email', None)
        known['username'] = known_data.get('username', None)
        progress_bar.next()
    file_name = '%s/known_all.csv' % OUTPUT_CSVS_DIR
    fields = ['id', 'username', 'name', 'email', 'phone', 'picture']
    save_list_of_dicts(file_name, knowns, fields)
    terminal.info('%s knowns found' % len(knowns))
Beispiel #12
0
def compute_hog_descriptors(hog, image_regions, window_dims, label):
    # hog = get_hog_object(window_dims)

    progressbar = ProgressBar('Computing descriptors', max=len(image_regions))
    reg_descriptors = []
    for reg in image_regions:
        progressbar.next()
        img = reg.load_cropped_resized_sample(window_dims)

        grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        winStride = (8, 8)
        padding = (0, 0)
        locations = []  # (10, 10)# ((10,20),)
        hist = hog.compute(grey, winStride, padding, locations)

        reg_desc = utils.RegionDescriptor(reg, hist, label)
        reg_descriptors.append(reg_desc)
    progressbar.finish()
    return reg_descriptors
Beispiel #13
0
    def start(self, download_dir: str):
        """Starts the download process.

        Arguments:
            download_dir {str} -- Download directory
        """

        self.logger.info(
            f"Attempting to download {self.comic.title}, {len(self.chapters)} chapter(s).")

        download_dir = path.normpath(download_dir)
        comic_dir = path.join(
            download_dir, clean_filename(self.comic.title))

        self.comic_dir = comic_dir

        self.logger.debug(f"Trying to create folders: {comic_dir}")
        create_folders(comic_dir)

        total_pages = 0
        for chapter in self.chapters:
            total_pages += len(chapter.get_pages())

        with ProgressBar(f"Downloading {self.comic.title}", max=total_pages) as bar:
            start_time = time.time()

            if self.number_of_threads > 1:
                self.logger.debug("Starting multithreaded download...")
                self._multi_threaded_download(download_dir, bar=bar)
            else:
                self.logger.debug("Starting singlethreaded download...")
                self._single_threaded_download(download_dir, bar=bar)

            end_time = time.time()

            print()
            self.logger.info(
                f"Download complete! Time elapsed: {end_time - start_time}")

            self._convert()
            self.logger.info(f"Operation done!")
Beispiel #14
0
def save_list_of_dicts(file_name: str, l: list, fields: list):
    exists = os.path.exists(file_name)
    mode = 'w+'
    if exists:
        mode = 'a'
    with open(file_name, mode) as f:
        writer = csv.DictWriter(f, fieldnames=fields)
        if not exists:
            writer.writeheader()
        progress_bar = ProgressBar('Saving %s' %
                                   relative_path(file_name), max=len(l))
        for el in l:
            row = {}
            for field in fields:
                try:
                    row[field] = el[field]
                except KeyError as ex:
                    raise ex
            if row.keys():
                writer.writerow(row)
            progress_bar.next()
        f.close()
    print()
    return f
    def _run_epoch(self,
                   model,
                   dataloader,
                   optimize=False,
                   save_activations=False,
                   reweight=None,
                   bit_pretrained=False,
                   adv_metrics=False):
        """Runs the model on a given dataloader.
        
        Note:
            The latter item in the returned tuple is what is necessary to run 
            GEORGECluster.train and GEORGECluster.evaluate.
        
        Args:
            model(nn.Module): A PyTorch model.
            dataloader(DataLoader): The dataloader. The dataset within must
                subclass GEORGEDataset.
            optimize(bool, optional): If True, the model is trained on self.criterion.
            save_activations(bool, optional): If True, saves the activations in
                `outputs`. Default is False.
            bit_pretrained(bool, optional): If True, assumes bit_pretrained and does not evaluate
                performance metrics
                
        Returns:
            metrics(Dict[str, Any]) A dictionary object that stores the metrics defined
                in self.config['metric_types'].
            outputs(Dict[str, Any]) A dictionary object that stores artifacts necessary
                for model analysis, including labels, activations, and predictions.
        """
        dataset = dataloader.dataset
        self._check_dataset(dataset)
        type_to_num_classes = {
            label_type: dataset.get_num_classes(label_type)
            for label_type in LABEL_TYPES
            if label_type in dataset.Y_dict.keys()
        }
        outputs = {
            'metrics': None,
            'activations': [],
            'superclass': [],
            'subclass': [],
            'true_subclass': [],
            'alt_subclass': [],
            'targets': [],
            'probs': [],
            'preds': [],
            'losses': [],
            'reweight': [],
        }
        activations_handle = self._init_activations_hook(
            model, outputs['activations'])
        if optimize:
            progress_prefix = 'Training'
            model.train()
        else:
            progress_prefix = 'Evaluation'
            model.eval()

        per_class_meters = self._init_per_class_meters(type_to_num_classes)
        metric_meters = {
            k: AverageMeter()
            for k in ['loss', 'acc', 'loss_rw', 'acc_rw']
        }

        progress = self.config['show_progress']
        if progress:
            bar = ProgressBar(progress_prefix, max=len(dataloader), width=50)

        for batch_idx, (inputs, targets) in enumerate(dataloader):
            batch_size = len(inputs)
            if self.use_cuda:
                inputs, targets = move_to_device([inputs, targets],
                                                 device=self.device)

            type_to_labels = {}
            for label_type in type_to_num_classes.keys():
                type_to_labels[label_type] = targets[label_type]
                outputs[label_type].append(targets[label_type])

            if optimize and not bit_pretrained:
                logits = model(inputs)
                loss_targets = targets['superclass']
                co = self.criterion(logits, loss_targets, targets['subclass'])
                loss, (losses, corrects), _ = co
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            else:
                with torch.no_grad():
                    logits = model(inputs)
                    loss_targets = targets['superclass']
                    if bit_pretrained:
                        if progress:
                            bar.suffix = PROGRESS_BAR_SUFFIX.format(
                                batch=batch_idx + 1,
                                size=len(dataloader),
                                total=format_timedelta(bar.elapsed_td),
                                eta=format_timedelta(bar.eta_td),
                                **{k: 0
                                   for k in prog_metric_names})
                            bar.next()
                        continue
                    co = self.criterion(logits, loss_targets,
                                        targets['subclass'])
                    loss, (losses, corrects), _ = co

            if not save_activations:
                outputs['activations'].pop()  # delete activations

            reweight_vec = None if reweight is None else reweight[
                targets['true_subclass']]

            metrics = self._compute_progress_metrics(losses,
                                                     corrects,
                                                     type_to_labels,
                                                     type_to_num_classes,
                                                     per_class_meters,
                                                     reweight=reweight_vec)
            acc, preds = compute_accuracy(logits.data,
                                          loss_targets.data,
                                          return_preds=True)

            outputs['probs'].append(
                F.softmax(logits, dim=1).detach().cpu()[:, 1])
            outputs['preds'].append(preds)
            outputs['losses'].append(losses.detach().cpu())
            outputs['targets'].append(loss_targets.detach().cpu())
            if reweight_vec is not None:
                outputs['reweight'].append(reweight_vec.cpu())

            self._update_metrics(metric_meters, acc, loss, losses, corrects,
                                 batch_size, reweight_vec)

            PROGRESS_BAR_STR = PROGRESS_BAR_SUFFIX

            if self.compute_auroc:
                sub_map = dataloader.dataset.get_class_map('subclass')
                assert (set(sub_map.keys()) == {0,
                                                1})  # must be a binary problem
                targets_cat, probs_cat = torch.cat(
                    outputs['targets']), torch.cat(outputs['probs'])
                auroc = compute_roc_auc(targets_cat, probs_cat)
                metrics['auroc'] = auroc
                has_alt_subclass = 'alt_subclass' in dataloader.dataset.Y_dict
                for key in ['subclass', 'true_subclass'
                            ] + ['alt_subclass'] * has_alt_subclass:
                    sub_map = dataloader.dataset.get_class_map(key)
                    neg_subclasses = sub_map[0]
                    pos_subclasses = sub_map[1]
                    if len(neg_subclasses) == len(pos_subclasses) == 1:
                        # only one subclass in each superclass
                        rob_auroc = auroc
                    else:
                        subclass_labels = torch.cat(outputs[key])
                        paired_aurocs = []
                        for neg_subclass in neg_subclasses:
                            for pos_subclass in pos_subclasses:
                                inds = ((subclass_labels == neg_subclass) |
                                        (subclass_labels
                                         == pos_subclass)).cpu()
                                subset_pair_auroc = compute_roc_auc(
                                    targets_cat[inds], probs_cat[inds])
                                paired_aurocs.append(subset_pair_auroc)
                        rob_auroc = min(paired_aurocs)
                    metrics[f'{key}_rob_auroc'] = rob_auroc
                if not has_alt_subclass:
                    metrics[alt_subclass_rob_auroc] = auroc
                PROGRESS_BAR_STR += ' | AUROC: {auroc:.4f} | R AUROC: {subclass_rob_auroc:.4f} | ' \
                                    'TR AUROC: {true_subclass_rob_auroc:.4f} | AR AUROC: {alt_subclass_rob_auroc:.4f}'

            if progress:
                bar.suffix = PROGRESS_BAR_STR.format(
                    batch=batch_idx + 1,
                    size=len(dataloader),
                    total=format_timedelta(bar.elapsed_td),
                    eta=format_timedelta(bar.eta_td),
                    **{
                        **metrics,
                        **{k: v.avg
                           for k, v in metric_meters.items()}
                    })
                bar.next()
        if progress:
            bar.finish()
        if activations_handle:
            activations_handle.remove()

        for k, v in outputs.items():
            if type(v) == list and len(v) > 0:
                outputs[k] = concatenate_iterable(v)

        if bit_pretrained:
            return outputs['metrics'], outputs

        outputs['metrics'] = metrics
        outputs['metrics'].update(
            {k: float(v.avg)
             for k, v in metric_meters.items()})
        outputs['metrics'].update(self._compute_aggregate_metrics(outputs))
        self._print_output_metrics(outputs)

        if adv_metrics:
            scaa = np.mean([
                ga.avg * 100
                for ga in np.array(per_class_meters[f'per_true_subclass_accs'])
            ])
            self.logger.info(
                f'All accs: {[ga.avg * 100 for ga in np.array(per_class_meters[f"per_true_subclass_accs"])]}'
            )
            self.logger.info(f'SCAA: {scaa:.3f}')
            ap = sklearn.metrics.average_precision_score(
                outputs['targets'],
                outputs['probs'],
                sample_weight=outputs['reweight']
                if reweight_vec is not None else None)
            self.logger.info(f'MaP: {ap:.4f}')

        return outputs['metrics'], outputs
Beispiel #16
0
    def process(self,
                cpu: interface.Processor,
                selector: Tuple[str, str] = None,
                source: tsdb.Database = None,
                fieldmapper: FieldMapper = None,
                gzip: bool = False,
                buffer_size: int = 1000) -> None:
        """
        Process each item in a [incr tsdb()] test suite.

        The output rows will be flushed to disk when the number of new
        rows in a table is *buffer_size*.

        Args:
            cpu (:class:`~delphin.interface.Processor`): processor
                interface (e.g., :class:`~delphin.ace.ACEParser`)
            selector: a pair of (table_name, column_name) that specify
                the table and column used for processor input (e.g.,
                `('item', 'i-input')`)
            source (:class:`~delphin.tsdb.Database`): test suite from
                which inputs are taken; if `None`, use the current
                test suite
            fieldmapper (:class:`FieldMapper`): object for
                mapping response fields to [incr tsdb()] fields; if
                `None`, use a default mapper for the standard schema
            gzip: if `True`, compress non-empty tables with gzip
            buffer_size (int): number of output rows to hold in memory
                before flushing to disk; ignored if the test suite is all
                in-memory; if `None`, do not flush to disk
        Examples:
            >>> ts.process(ace_parser)
            >>> ts.process(ace_generator, 'result:mrs', source=ts2)
        """
        if selector is None:
            assert isinstance(cpu.task, str)
            input_table, input_column = _default_task_selectors[cpu.task]
        else:
            input_table, input_column = selector
        if (input_table not in self.schema
            or all(f.name != input_column
                   for f in self.schema[input_table])):
            raise ITSDBError('invalid table or column: {!s}, {!s}'
                             .format(input_table, input_column))
        if source is None:
            source = self
        if fieldmapper is None:
            fieldmapper = FieldMapper(source=source)
        index = tsdb.make_field_index(source.schema[input_table])

        affected = set(fieldmapper.affected_tables).intersection(self.schema)
        for name in affected:
            self[name].clear()

        key_names = [f.name for f in source.schema[input_table] if f.is_key]

        bar = None
        if not logger.isEnabledFor(logging.INFO):
            with tsdb.open(source.path, input_table) as fh:
                total = sum(1 for _ in fh)
            if total > 0:
                bar = ProgressBar('Processing', max=total)

        for row in source[input_table]:
            datum = row[index[input_column]]
            keys = [row[index[name]] for name in key_names]
            keys_dict = dict(zip(key_names, keys))
            response = cpu.process_item(datum, keys=keys_dict)

            logger.info(
                'Processed item {:>16}  {:>8} results'
                .format(tsdb.join(keys), len(response['results']))
            )
            if bar:
                bar.next()

            for tablename, data in fieldmapper.map(response):
                _add_row(self, tablename, data, buffer_size)

        for tablename, data in fieldmapper.cleanup():
            _add_row(self, tablename, data, buffer_size)

        if bar:
            bar.finish()

        tsdb.write_database(self, self.path, gzip=gzip)
Beispiel #17
0
    def detect_objects_in_image(self,
                                img,
                                greyscale=False,
                                resize=True,
                                return_detection_img=True,
                                progress=True):
        h, w = img.shape[:2]
        scaled_img_dims = (w, h)
        if resize:
            max_w = 1024
            # max_w = 200
            if img.shape[0] > max_w:
                # print 'resize:', img_path, img.shape
                # img = cv2.resize(img, dsize=None, fx=0.5, fy=0.5)
                h, w = img.shape[:2]
                aspect = w / float(h)
                new_h = int(max_w / aspect)
                img = cv2.resize(img, dsize=(max_w, new_h))
                scaled_img_dims = (max_w, new_h)

        print 'img.shape:', img.shape

        def get_win_gen(only_rects=False):
            return detectutils.sliding_window_generator(
                img,
                window_dims=self.window_dims,
                scale_factor=1.1,
                strides=(8, 8),
                # scale_factor=1.2,
                # strides=(16, 16),
                only_rects=only_rects)

        win_gen = get_win_gen()
        num_windows = sum(1 for _ in get_win_gen(only_rects=True))
        batch_size = 100
        # num_batches = int(np.ceil(num_windows/float(batch_size)))

        detected_cars = []

        progressbar = None
        if progress:
            progressbar = ProgressBar('Processing windows:',
                                      max=num_windows,
                                      suffix='%(index)d/%(max)d - %(eta)ds')
        while True:
            samples_windows = list(itertools.islice(win_gen, 0, batch_size))
            if len(samples_windows) == 0:
                break

            # Unzip the list of tuples into two lists:
            samples, windows = zip(*samples_windows)

            # Perform the required colour conversion and preprocessing:
            samples = [self.prepare_sample(sample) for sample in samples]

            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns*depth]
            samples = np.stack(samples)
            samples = samples.reshape(
                samples.shape[0],
                samples.shape[1] * samples.shape[2] * samples.shape[3])

            feed = {self.x: samples, self.keep_prob: 1.0}
            label_probs = self.sess.run(tf.nn.softmax(self.logits),
                                        feed_dict=feed)

            if progress:
                progressbar.next(batch_size)

            for probs, window in zip(label_probs, windows):
                pos_prob, neg_prob = probs
                if pos_prob > neg_prob:
                    detected_cars.append(window.opencv_bbox)

        if progress:
            progressbar.finish()

        if len(detected_cars) > 0:
            detected_cars = np.stack(detected_cars)
        else:
            detected_cars = np.array([])

        if return_detection_img:
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            draw_detections(img, detected_cars)
            return detected_cars.tolist(), img
        else:
            return detected_cars.tolist(), scaled_img_dims
 def train(self, num_games, print_progress=True):
     # Create a set of identical players, each referencing this game tree
     players = [
         Player(self,
                num_players=self.num_players,
                starting_coins=self.starting_coins,
                low_card=self.low_card,
                high_card=self.high_card,
                discard=self.discard) for _ in range(self.num_players)
     ]
     # Simulate self-play
     if print_progress:
         iterable = ProgressBar('Training').iter(range(num_games))
     else:
         iterable = range(num_games)
     for _ in iterable:
         winners, _ = nothanks.Game(players,
                                    starting_coins=self.starting_coins,
                                    low_card=self.low_card,
                                    high_card=self.high_card,
                                    discard=self.discard).run()
         # Update the shared game tree based on game results.
         for player in players:
             payoff = -1 / self.num_players
             if id(player) in winners:
                 payoff += 1 / len(winners)
             for state_hash, action in player.history.items():
                 node = self.tree.nodes[state_hash]
                 state = node['state']
                 # If the player has no coins, there is no decision to make.
                 if not node['can_pass']:
                     continue
                 # If the expected return from the alternate decision is
                 # higher than the return from this game, the player regrets
                 # not having chosen the alternate decision.
                 take_state = deepcopy(state)
                 take_state.take()
                 take_edge = self.tree.edges[(state_hash,
                                              take_state.prehash())]
                 pass_state = deepcopy(state)
                 pass_state.pass_turn()
                 pass_edge = self.tree.edges[(state_hash,
                                              pass_state.prehash())]
                 if action:  # took card and pot when in this state
                     alternate = pass_state
                     alt_payoff = self.get_expected_payoff_pass(state)
                 else:  # passed when in this state
                     alternate = take_state
                     alt_payoff = self.get_expected_payoff_take(state)
                 regret = alt_payoff - payoff
                 # If we have never seen this state before, assign defaults.
                 if node['visits'] == 0:
                     logger.debug(
                         'LOG: State {} was visited for the first time this game.'
                         .format(state_hash))
                 # Then update node and edge values
                 node['visits'] += 1
                 # Add new regret for the action we DIDN'T take
                 node['regret'][not action] += regret
                 # Update strategy based on new cumulative regret
                 effective_regret = [max(0, r) for r in node['regret']]
                 total_regret = sum(effective_regret)
                 if total_regret == 0:
                     take_edge['weight'] = 1 / 2
                     pass_edge['weight'] = 1 / 2
                 else:
                     take_edge[
                         'weight'] = effective_regret[True] / total_regret
                     pass_edge[
                         'weight'] = effective_regret[False] / total_regret
                 # Adjust running average strategy
                 take_edge['avg_weight'] *= (node['visits'] -
                                             1) / node['visits']
                 take_edge[
                     'avg_weight'] += take_edge['weight'] / node['visits']
                 pass_edge['avg_weight'] *= (node['visits'] -
                                             1) / node['visits']
                 pass_edge[
                     'avg_weight'] += pass_edge['weight'] / node['visits']