Beispiel #1
0
    def start(self):
        if self.javac.args.version:
            if self.args.buck:
                key = self.args.analyzer
                print(utils.infer_key(key), file=sys.stderr)
            else:
                return self.javac.run()
        else:
            start_time = time.time()

            self.compile()
            if self.args.analyzer == COMPILE:
                return os.EX_OK

            self.run_infer_frontend()
            self.timing["capture"] = utils.elapsed_time(start_time)
            if self.args.analyzer == CAPTURE:
                return os.EX_OK

            self.analyze_and_report()
            self.close()
            self.timing["total"] = utils.elapsed_time(start_time)
            self.save_stats()

            procs_total = self.stats["int"]["procedures"]
            files_total = self.stats["int"]["files"]
            procs_str = utils.get_plural("procedure", procs_total)
            files_str = utils.get_plural("file", files_total)
            print("\nAnalyzed %s in %s" % (procs_str, files_str))

            return self.stats
Beispiel #2
0
    def start(self):
        if self.javac.args.version:
            if self.args.buck:
                key = self.args.analyzer
                print(utils.infer_key(key), file=sys.stderr)
            else:
                return self.javac.run()
        else:
            start_time = time.time()
            if self.capture() == os.EX_OK:
                self.timing['capture'] = utils.elapsed_time(start_time)
                self.analyze_and_report()
                self.close()
                elapsed = utils.elapsed_time(start_time)
                self.timing['total'] = elapsed
                self.save_stats()

                if not self.args.analyzer in [COMPILE, CAPTURE]:
                    procs_total = self.stats['int']['procedures']
                    files_total = self.stats['int']['files']
                    procs_str = utils.get_plural('procedure', procs_total)
                    files_str = utils.get_plural('file', files_total)
                    print('\nAnalyzed %s in %s' % (procs_str, files_str))
                return self.stats
            else:
                return dict({})
Beispiel #3
0
    def train(self, num_epochs, scheduler, info_freq, log_dir):
        """Train multimodal PyTorch model."""
        start_time = time.time()

        # Handle keyboard interrupt
        try:
            self._run_training_loop(num_epochs, scheduler, info_freq, log_dir)

            hrs, mins, secs = utils.elapsed_time(start_time)
            print()
            message = '>>>>> Training completed in'
            if hrs > 0:
                message += f' {hrs}h'
            if mins > 0:
                message += f' {mins}m'
            print(message + f' {secs}s')
            print('>>>>> Best validation C-indices:')
            for k, v in self.best_perf.items():
                print(f'     {v} ({k})')
        except KeyboardInterrupt:
            hrs, mins, secs = utils.elapsed_time(start_time)
            print()
            print('>>> Keyboard interrupt! <<<')
            print(f'(trained for {hrs}h {mins}m {secs}s)')
            print()
            print('Best validation concordance values:')
            for k, v in self.best_perf.items():
                print(f'     {round(v, 4)} ({k})')
Beispiel #4
0
    def start(self):
        if self.javac.args.version:
            if self.args.buck:
                key = self.args.analyzer
                print(utils.infer_key(key), file=sys.stderr)
            else:
                return self.javac.run()
        else:
            start_time = time.time()
            if self.capture() == os.EX_OK:
                self.timing['capture'] = utils.elapsed_time(start_time)
                self.analyze_and_report()
                self.close()
                elapsed = utils.elapsed_time(start_time)
                self.timing['total'] = elapsed
                self.save_stats()

                if not self.args.analyzer in [COMPILE, CAPTURE]:
                    procs_total = self.stats['int']['procedures']
                    files_total = self.stats['int']['files']
                    procs_str = utils.get_plural('procedure', procs_total)
                    files_str = utils.get_plural('file', files_total)
                    print('\nAnalyzed %s in %s' % (procs_str, files_str))
                return self.stats
            else:
                return dict({})
Beispiel #5
0
 def analyze_and_report(self):
     if self.args.analyzer not in [COMPILE, CAPTURE]:
         analysis_start_time = time.time()
         if self.analyze() == os.EX_OK:
             elapsed = utils.elapsed_time(analysis_start_time)
             self.timing["analysis"] = elapsed
             reporting_start_time = time.time()
             self.report_errors()
             elapsed = utils.elapsed_time(reporting_start_time)
             self.timing["reporting"] = elapsed
Beispiel #6
0
 def analyze_and_report(self):
     if self.args.analyzer not in [COMPILE, CAPTURE]:
         analysis_start_time = time.time()
         if self.analyze() == os.EX_OK:
             elapsed = utils.elapsed_time(analysis_start_time)
             self.timing['analysis'] = elapsed
             reporting_start_time = time.time()
             self.report_errors()
             elapsed = utils.elapsed_time(reporting_start_time)
             self.timing['reporting'] = elapsed
Beispiel #7
0
 def analyze_and_report(self):
     if self.args.analyzer not in [COMPILE, CAPTURE]:
         if self.analyze() == os.EX_OK:
             reporting_start_time = time.time()
             self.report_errors()
             elapsed = utils.elapsed_time(reporting_start_time)
             self.timing['reporting'] = elapsed
Beispiel #8
0
def wastar_search(start_v,
                  end_v,
                  neighbors_fn,
                  cost_fn=unit_cost_fn,
                  heuristic_fn=zero_heuristic_fn,
                  w=1,
                  max_cost=INF,
                  max_time=INF):
    # TODO: lazy wastar to get different paths
    #heuristic_fn = lambda v: cost_fn(v, end_v)
    priority_fn = lambda g, h: g + w * h
    goal_test = lambda v: v == end_v

    start_time = time.time()
    start_g, start_h = 0, heuristic_fn(start_v)
    visited = {start_v: Node(start_g, None)}
    queue = [(priority_fn(start_g, start_h), start_g, start_v)]
    while queue and (elapsed_time(start_time) < max_time):
        _, current_g, current_v = heappop(queue)
        if visited[current_v].g < current_g:
            continue
        if goal_test(current_v):
            return retrace_path(visited, current_v)
        for next_v in neighbors_fn(current_v):
            next_g = current_g + cost_fn(current_v, next_v)
            if (next_v not in visited) or (next_g < visited[next_v].g):
                visited[next_v] = Node(next_g, current_v)
                next_h = heuristic_fn(next_v)
                if priority_fn(next_g, next_h) < max_cost:
                    heappush(queue,
                             (priority_fn(next_g, next_h), next_g, next_v))
    return None
Beispiel #9
0
 def start(self):
     if self.javac.args.version:
         if self.args.buck:
             key = self.args.analyzer
             print(utils.infer_key(key), file=sys.stderr)
         else:
             return self.javac.run()
     else:
         start_time = time.time()
         if self.capture() == os.EX_OK:
             self.timing['capture'] = utils.elapsed_time(start_time)
             self.analyze_and_report()
             self.close()
             elapsed = utils.elapsed_time(start_time)
             self.timing['total'] = elapsed
             self.save_stats()
             return self.stats
         else:
             return dict({})
Beispiel #10
0
 def start(self):
     if self.javac.args.version:
         if self.args.buck:
             key = self.args.analyzer
             print(utils.infer_key(key), file=sys.stderr)
         else:
             return self.javac.run()
     else:
         start_time = time.time()
         if self.capture() == os.EX_OK:
             self.timing["capture"] = utils.elapsed_time(start_time)
             self.analyze_and_report()
             self.close()
             elapsed = utils.elapsed_time(start_time)
             self.timing["total"] = elapsed
             self.save_stats()
             return self.stats
         else:
             return dict({})
Beispiel #11
0
def load_data(elec_index, args):
    data_path = args.in_path
    og_labels = np.array(LABELS[args.label])
    stratify = og_labels
    if args.label == "gender":
        col = args.label
    else:
        col = "age"

    if args.label in ["subject"]:
        stratify = None

    if args.feature == "bands":
        get_features = extract_bands
    else:
        get_features = lambda x: x

    if args.verbose > 0:
        print("Loading data...")
    data_df = SUB_DF[["participant_id", col]]
    train_index, test_index = train_test_split(
        list(range(len(data_df))),
        test_size=int(args.test_size * len(data_df)),
        shuffle=True,
        stratify=stratify,
        random_state=42,
    )

    train_df = data_df.iloc[train_index]
    train_labels = og_labels[train_index]
    test_df = data_df.iloc[test_index]
    test_labels = og_labels[train_index]

    if args.verbose > 0 and args.time:
        start = time.time()
    train_set = load_freq_data(train_df, elec_index, get_features,
                               train_labels, args, data_path)
    test_set = load_freq_data(test_df, elec_index, get_features, test_labels,
                              args, data_path)
    if args.verbose > 0 and args.time:
        print("Time spent loading data:", elapsed_time(time.time(), start))
    if args.verbose > 0:
        print("Done")
        print(
            f"number of subjects for training: {len(train_df)}, testing: {len(test_df)}"
        )
        print(
            f"train_size: {train_set[0].shape} (Used for hyperparameter tuning)"
        )
        print(f"test_size: {test_set[0].shape} (used to evaluate the model)")
    return train_set, test_set
Beispiel #12
0
def classif(train_set, test_set, args):
    X, y, groups = train_set
    X_test, y_test, groups_test = test_set

    cv = create_crossval(args.label, y)
    if args.verbose > 0 and args.time:
        start = time.time()
    clf, param, train = random_search(args, cv, X, y, groups)
    if args.verbose > 0 and args.time and args.clf not in ["LDA", "QDA"]:
        print("Time spend in Random Search:", elapsed_time(time.time(), start))
    cv = create_crossval(args.label, y_test)
    if args.verbose > 0 and args.time:
        start = time.time()
    test = np.mean(
        cross_val_score(clf,
                        X_test,
                        y_test,
                        groups=groups_test,
                        cv=cv,
                        n_jobs=args.cores))
    if args.verbose > 0 and args.time:
        print("Time spent evaluating the model:",
              elapsed_time(time.time(), start))
    return param, train, test
Beispiel #13
0
def preprocess_data():
    print('Beginning Preprocessing:')
    start_preprocessing = time()

    print('\t(1/5) Loading stock price data...')
    price_data_train, price_data_val, price_data_test = load_stock_price_data(
        '/home/mrkeaton/Documents/Datasets/stocknet-dataset/price/preprocessed',
        start_dt, end_train, end_val, end_dt)

    print('\t(2/5) Loading tweet data...')
    tweet_data_train, tweet_data_val, tweet_data_test = load_tweet_data(
        '/home/mrkeaton/Documents/Datasets/stocknet-dataset/tweet/preprocessed',
        end_train, end_val)

    print('\t(3/5) Generating sentiments from tweet data...')
    sentiments_train = pretrained_sentiment(tweet_data_train)
    sentiments_val = pretrained_sentiment(tweet_data_val)
    sentiments_test = pretrained_sentiment(tweet_data_test)

    print('\t(4/5) Combining price data and sentiments...')
    d_train = combine_price_and_sentiment(price_data_train, sentiments_train,
                                          start_dt, end_train)
    d_val = combine_price_and_sentiment(price_data_val, sentiments_val,
                                        end_train, end_val)
    d_test = combine_price_and_sentiment(price_data_test, sentiments_test,
                                         end_val, end_dt)

    print('\t(5/5) Creating time-lagged data samples...')
    data_train = gen_timeseries_samples(d_train, step, lag)
    data_val = gen_timeseries_samples(d_val, step, lag)
    data_test = gen_timeseries_samples(d_test, step, lag)

    print('Completed in {}'.format(elapsed_time(time() - start_preprocessing)))

    print('Saving files as pickles...\n')
    if not os.path.exists('saved_data'):
        os.mkdir('saved_data')
    with open(os.path.join('saved_data', 'transformer_train_data.pkl'),
              'wb') as train_d:
        pickle.dump(data_train, train_d)
    with open(os.path.join('saved_data', 'transformer_val_data.pkl'),
              'wb') as val_d:
        pickle.dump(data_val, val_d)
    with open(os.path.join('saved_data', 'transformer_test_data.pkl'),
              'wb') as test_d:
        pickle.dump(data_test, test_d)

    return data_train, data_val, data_test
Beispiel #14
0
def main():
    common.initialize()

    known, common.gopts = getopts(header)

    statsName = "%s/%s.results" % (common.gopts.out, common.gopts.name)
    common.gopts.statsF = open(statsName, "w")
    #     print(short_header)
    #     eprint(short_header)

    print("\t(mode: %s)" % common.gopts.mode)
    eprint("\t(mode: %s)" % common.gopts.mode)

    print("\t(reuse: %s)" % common.gopts.reuse)
    eprint("\t(reuse: %s)" % common.gopts.reuse)

    print("\t(opt: %s)" % common.gopts.opt)
    eprint("\t(opt: %s)" % common.gopts.opt)

    print("\t(const: %s)" % common.gopts.const)
    eprint("\t(const: %s)" % common.gopts.const)

    print("\t(wires: %s)" % common.gopts.wires)
    eprint("\t(wires: %s)" % common.gopts.wires)

    if common.gopts.init != DEFAULT_INITSZ:
        eprint("\t(init size: %s)" % common.gopts.init)

    if common.gopts.mode == "frpo":
        from fr import forwardReach
        system = forwardReach(common.gopts.file)
        backwardReach(common.gopts.file, system)
    else:
        backwardReach(common.gopts.file, None)

    common.gopts.statsF.write("wall-time-sec:\t%.2f\n" % (elapsed_time()))

    maxMem = (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / (1024.0)
    common.gopts.statsF.write("memory-mb:\t%.2f\n" % (maxMem))

    common.gopts.statsF.close()
Beispiel #15
0
def get_or_generate_tile(url):
	# Get tile from cache
	tile = get_tile(url)

	# If we have a tile, check timestamp
	if tile:
		now = datetime.now()
		generated = tile.timestamp
		timediff = utils.elapsed_time((now-generated).seconds)

		if (now - generated) > timedelta(days=2):
			logging.info("Tile %s expired %s ago" % (url, timediff))
		else:
			logging.info("Tile %s found in store that is %s old" % (url, timediff))
			return tile.img

	# Fall back to generate a new tile
	img = make_tile(url)

	save_tile(img, url)
	return img
Beispiel #16
0
def get_or_generate_tile(url):
	# Get tile from cache
	tile = get_tile(url)

	# If we have a tile, check timestamp
	if tile:
		now = datetime.now()
		generated = tile.timestamp
		timediff = utils.elapsed_time((now-generated).seconds)

		if (now - generated) > timedelta(days=2):
			logging.info("Tile %s expired %s ago" % (url, timediff))
		else:
			logging.info("Tile %s found in store that is %s old" % (url, timediff))
			return tile.img

	# Fall back to generate a new tile
	img = make_tile(url)

	save_tile(img, url)
	return img
Beispiel #17
0
def state():
    board.populate()
    utils.spawn_new()

    while not utils.check_loss():
        # print(utils.elapsed_time())
        # thresh = round(utils.elapsed_time() % globs.delay, 2)
        # print(thresh)
        #  if (utils.elapsed_time() % globs.delay == 0):
        t = str(utils.elapsed_time())
        #  print(t)
        drop_nums = globs.drop_lists[globs.current_level - 1]
        if (any(x in t for x in drop_nums)):
            Canvas.draw_board()

            if (not globs.current_piece.collision()):
                globs.current_piece.move_down()
                utils.insert_piece(globs.current_piece)

            else:
                # utils.reset_piece(globs.preview_pc)
                utils.de_select_piece(globs.current_piece)
                utils.clear_board()
                # print("DONE CLEARING")
                # time.sleep(1)
                utils.spawn_new()
                # print("SPAWNED")
                globs.num_placed += 1
                utils.check_level()
            # time.sleep(0.1)

        # give player 0.2 seconds to make a last-second adjustment
        if globs.current_piece.collision() and not globs.dropped:
            Canvas.draw_board()
            kb.actions(kb.get_dir(0.2))
        # give player 0.5 seconds to slide piece once made contact
        # TODO set timer, only stop accepting input once timer runs out
        # reset timer when key is pressed
        else:
            kb.actions(kb.get_dir(0.05))
Beispiel #18
0
def get_or_generate_thumb(type, hometeam, awayteam, size):
	# Get thumb from cache
	name = "%s_%s_%s_%s" % (type, hometeam, awayteam, size)
	thumb = get_thumb(name)

	# If we have a thumb, check timestamp
	if thumb:
		now = datetime.now()
		generated = thumb.timestamp
		timediff = utils.elapsed_time((now-generated).seconds)
	
		if (now - generated) > timedelta(days=1):
			logging.info("Thumb %s expired %s ago" % (name, timediff))
		else:
			logging.info("Thumb %s found in store that is %s old" % (name, timediff))
			return thumb.img

	# Fall back to generate a new thumb
	img = generate_thumb(type, hometeam, awayteam, size)
	if img:
		save_thumb(img, name)

	return img
Beispiel #19
0
def departure_stat_send(request):
	if request.method == 'POST':
		form = forms.DepartureStatForm(request.POST)
		if form.is_valid():

			# Get any previous entries from this device
			previous_stats = models.DepartureStat.objects.filter(device_id=form.data['device_id'])
			now = datetime.now()
			for prev in previous_stats:
				time_diff = now - prev.timestamp
				if time_diff < timedelta(minutes=10):
					# Last update is less than 10 mins ago, we return and not save
					logging.info("Form error: last entry was less 10 mins ago (%s)" % utils.elapsed_time(time_diff.seconds))
					return HttpResponse("ERR\n", status=405)
				
			# No entries less an 1 hour old, so save this one
			stat = form.save()
			logging.info("Form OK: %s" % form.data)
			return HttpResponse("OK\n")
		else:
			logging.info("Form error: %s" % form.errors)
			return HttpResponse("ERR\n", status=405)

	return HttpResponse("")
Beispiel #20
0
        hand2.extend([card2, card1])
    return hand1, hand2


def play_game(hand1: Hand, hand2: Hand) -> Tuple[Hand, Hand]:
    while True:
        try:
            hand1, hand2 = play_round(hand1, hand2)
        except IndexError:
            return hand1, hand2


def score_hand(hand: Hand) -> int:
    return sum(mul * card for mul, card in enumerate(reversed(hand), start=1))


def part1(filename: str) -> int:
    hand1, hand2 = get_hands(filename)
    return max(map(score_hand, play_game(hand1, hand2)))


def part2(filename: str) -> int:
    pass


if __name__ == '__main__':
    puzzle_input = 'test_input_day22.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 31314
        elapsed_time(part2, puzzle_input))
Beispiel #21
0
    grid.update(next_grid)
    return grid


def count_active_cubes(grid: Grid) -> int:
    return sum(cube_state == ACTIVE for cube_state in grid.values())


def simulate(filename: str, num_dimensions: int, num_cycles: int) -> Grid:
    grid, directions = initialize_nd_grid(filename, num_dimensions)
    for _ in range(num_cycles):
        grid = simulate_cycle(grid, directions)
    return grid


def part1(filename: str) -> int:
    grid = simulate(filename, 3, 6)
    return count_active_cubes(grid)


def part2(filename: str) -> int:
    grid = simulate(filename, 4, 6)
    return count_active_cubes(grid)


if __name__ == '__main__':
    puzzle_input = 'input_day17.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 286
        elapsed_time(part2, puzzle_input))  # 960
                    scores = []
                    pvalue = 0
                    good_score = cross_val_score(
                        cv=sl2go, estimator=clf, X=X, y=y, groups=groups, n_jobs=-1
                    ).mean()
                    for perm in range(n_permutations):
                        clf = LDA()
                        perm_set = permutation(len(y))
                        y_perm = y[perm_set]
                        groups_perm = groups[perm_set]
                        scores.append(
                            cross_val_score(
                                cv=sl2go,
                                estimator=clf,
                                X=X,
                                y=y_perm,
                                groups=groups_perm,
                                n_jobs=-1,
                            ).mean()
                        )
                    for score in scores:
                        if good_score <= score:
                            pvalue += 1 / n_permutations
                    # print('Done in %s' % elapsed_time(t3, time()))
                    data = {"score": good_score, "pscore": scores, "pvalue": pvalue}
                    print("%s : %0.2f significatif a p=%0.4f" % (key, score, pvalue))

                    savemat(results_file_path, data)

    print("total time lapsed : %s" % elapsed_time(t0, time()))
Beispiel #23
0
    def analyze(self):
        logging.info('Starting analysis')
        infer_analyze = [
            utils.get_cmd_in_bin_dir(INFER_ANALYZE_BINARY), '-results_dir',
            self.args.infer_out
        ]
        infer_options = []

        # remove specs if possible so that old issues are less likely
        # to be reported
        infer_options += ['-allow_specs_cleanup']

        if self.args.analyzer == ERADICATE:
            infer_options += ['-checkers', '-eradicate']
        elif self.args.analyzer == CHECKERS:
            infer_options += ['-checkers']
        else:
            if self.args.analyzer == TRACING:
                infer_options.append('-tracing')
            if os.path.isfile(utils.MODELS_JAR):
                infer_options += ['-models', utils.MODELS_JAR]

        if self.args.infer_cache:
            infer_options += ['-infer_cache', self.args.infer_cache]

        if self.args.ml_buckets:
            infer_options += ['-ml_buckets', self.args.ml_buckets]

        if self.args.notest:
            infer_options += ['-notest']

        if self.args.debug:
            infer_options += [
                '-developer_mode',
                '-html',
                '-dotty',
                '-print_types',
                '-trace_error',
                '-print_buckets',
                # '-notest',
            ]

        if self.args.incremental:
            if self.args.changed_only:
                infer_options.append('-incremental_changed_only')
            else:
                infer_options.append('-incremental')

        if self.args.specs_dirs:
            infer_options += self.args.specs_dirs

        exit_status = os.EX_OK

        if self.args.buck:
            infer_options += ['-project_root', os.getcwd(), '-java']
            if self.javac.args.classpath is not None:
                for path in self.javac.args.classpath.split(os.pathsep):
                    if os.path.isfile(path):
                        infer_options += ['-ziplib', os.path.abspath(path)]
        elif self.args.project_root:
            infer_options += ['-project_root', self.args.project_root]

        if self.args.multicore == 1:
            analysis_start_time = time.time()
            analyze_cmd = infer_analyze + infer_options
            exit_status = run_command(analyze_cmd, self.args.debug,
                                      self.javac.original_arguments,
                                      'analysis', self.args.analyzer)
            elapsed = utils.elapsed_time(analysis_start_time)
            self.timing['analysis'] = elapsed
            self.timing['makefile_generation'] = 0

        else:
            if self.args.analyzer in [ERADICATE, CHECKERS]:
                infer_analyze.append('-intraprocedural')

            os.environ['INFER_OPTIONS'] = ' '.join(infer_options)

            multicore_dir = os.path.join(self.args.infer_out, 'multicore')
            pwd = os.getcwd()
            if os.path.isdir(multicore_dir):
                shutil.rmtree(multicore_dir)
            os.mkdir(multicore_dir)
            os.chdir(multicore_dir)
            analyze_cmd = infer_analyze + ['-makefile', 'Makefile']
            analyze_cmd += infer_options
            makefile_generation_start_time = time.time()
            makefile_status = run_command(analyze_cmd, self.args.debug,
                                          self.javac.original_arguments,
                                          'create_makefile',
                                          self.args.analyzer)
            elapsed = utils.elapsed_time(makefile_generation_start_time)
            self.timing['makefile_generation'] = elapsed
            exit_status += makefile_status
            if makefile_status == os.EX_OK:
                make_cmd = ['make', '-k', '-j', str(self.args.multicore)]
                if not self.args.debug:
                    make_cmd += ['-s']
                analysis_start_time = time.time()
                make_status = run_command(make_cmd, self.args.debug,
                                          self.javac.original_arguments,
                                          'run_makefile', self.args.analyzer)
                elapsed = utils.elapsed_time(analysis_start_time)
                self.timing['analysis'] = elapsed
                os.chdir(pwd)
                exit_status += make_status

        if self.args.buck and exit_status == os.EX_OK:
            clean(self.args.infer_out)

        return exit_status
Beispiel #24
0
    Main function. Ask for a list of addresses or type 'ALL' to calculate for all buildings in the shapefile.
    Change the path of inBat and inRas for the desired building shapefile and water height raster.
    """
    inBat = r"E:\Charles_Tousignant\Python_workspace\Gari\shapefile\Digues\_batiments.shp"
    # inRas = r"E:\Charles_Tousignant\Python_workspace\Gari\shapefile\Digues\_hauteur_eau_debit_1369.tif"
    # inBat = r"E:\Charles_Tousignant\Python_workspace\Gari\shapefile\Zones_extraction\BV_Richelieu_geocode.shp"
    inRas = r"E:\Charles_Tousignant\Python_workspace\Gari\shapefile\Digues\_hauteur_eau_debit_1369_full.tif"
    inLines = r"E:\Charles_Tousignant\Python_workspace\Gari\shapefile\Digues\lines.shp"

    r_input = raw_input(
        "Entrez une liste d'adresses séparée par des points-virgules (;) ou 'ALL' pour toutes les adresses affectées"
    )
    if r_input == 'ALL':
        line_creator(inBat)
        digue(inLines, inRas)
    else:
        # inAdr = ["239 rue Beaubien, St-Jean", "38 rue Verdi, St-Jean", "17 rue Verdi, St-Jean"]
        # inAdr = 239 rue Beaubien, St-Jean ; 38 rue Verdi, St-Jean ; 17 rue Verdi, St-Jean ; 77 rue Roman, St-Jean
        # inAdr = 55 rue de l'Oasis, St-Jean ; 37 Baraby, St Jean ; 41 rue Barby, Saint-Jean; 354 rue Charles Preston, St-Jean;1414 rue de Foucault, St-Jean
        line_creator2(r_input.split(";"))
        digue(inLines, inRas)
    arcpy.Delete_management(inLines)


if __name__ == '__main__':
    main()
    print("##############################")
    print("Dike calculation complete!")
    print(elapsed_time())
    print("##############################")
Beispiel #25
0
                return True
            elif does_contain(target_bag, inner_bag, filename, rules, cache):
                cache[(filename, outer_bag, target_bag)] = True
                return True
        cache[(filename, outer_bag, target_bag)] = False
        return False


def number_contained(bag: str, rules: RuleDict) -> int:
    children = rules[bag]
    return (len(children) +
            sum(number_contained(child, rules) for child in children))


def part1(filename: str) -> int:
    rules = read_rules(filename)
    return sum(
        does_contain('shiny gold', bag, filename, rules) for bag in rules)


def part2(filename: str) -> int:
    rules = read_rules(filename)
    return number_contained('shiny gold', rules)


if __name__ == '__main__':
    puzzle_input = 'input_day07.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 126
        elapsed_time(part2, puzzle_input))  # 220149
Beispiel #26
0
                pass

        # Track progress
        solved_fields.add(solved_field)
        try:
            todo.remove(solved_field)
        except ValueError:
            pass

    return matches


def part1(rules: Rules, nearby: List[Ticket]) -> int:
    return sum(invalid_values(rules, nearby))


def part2(rules: Rules, ticket: Ticket, nearby: List[Ticket]) -> int:
    tickets = valid_tickets(rules, nearby)
    todo = [field for field in rules if field.startswith('departure')]
    matched_fields = eliminate_matches(match_fields(rules, tickets), todo)
    return prod(ticket[columns[0]]
                for field, columns in matched_fields.items() if field in todo)


if __name__ == '__main__':
    puzzle_input = 'input_day16.txt'
    rules, ticket, nearby = parse_input(puzzle_input)
    print_results(
        elapsed_time(part1, rules, nearby),  # 29759
        elapsed_time(part2, rules, ticket, nearby))  # 1307550234719
Beispiel #27
0
    def analyze(self):
        logging.info('Starting analysis')
        infer_analyze = [
            utils.get_cmd_in_bin_dir(INFER_ANALYZE_BINARY),
            '-results_dir',
            self.args.infer_out
        ]
        infer_options = []

        # remove specs if possible so that old issues are less likely
        # to be reported
        infer_options += ['-allow_specs_cleanup']

        if self.args.analyzer == ERADICATE:
            infer_options += ['-checkers', '-eradicate']
        elif self.args.analyzer == CHECKERS:
            infer_options += ['-checkers']
        else:
            if self.args.analyzer == TRACING:
                infer_options.append('-tracing')
            if os.path.isfile(utils.MODELS_JAR):
                infer_options += ['-models', utils.MODELS_JAR]

        if self.args.infer_cache:
            infer_options += ['-infer_cache', self.args.infer_cache]

        if self.args.objc_ml_buckets:
            infer_options += ['-objc_ml_buckets', self.args.objc_ml_buckets]

        if self.args.notest:
            infer_options += ['-notest']

        if self.args.debug:
            infer_options += [
                '-developer_mode',
                '-html',
                '-dotty',
                '-print_types',
                '-trace_error',
                '-print_buckets',
                # '-notest',
            ]

        if self.args.incremental:
            if self.args.changed_only:
                infer_options.append('-incremental_changed_only')
            else:
                infer_options.append('-incremental')

        if self.args.specs_dirs:
            infer_options += self.args.specs_dirs

        exit_status = os.EX_OK

        if self.args.buck:
            infer_options += ['-project_root', os.getcwd(), '-java']
            if self.javac.args.classpath is not None:
                for path in self.javac.args.classpath.split(os.pathsep):
                    if os.path.isfile(path):
                        infer_options += ['-ziplib', os.path.abspath(path)]
        elif self.args.project_root:
            infer_options += ['-project_root', self.args.project_root]

        if self.args.multicore == 1:
            analysis_start_time = time.time()
            analyze_cmd = infer_analyze + infer_options
            exit_status = run_command(
                analyze_cmd,
                self.args.debug,
                self.javac.original_arguments,
                'analysis',
                self.args.analyzer
            )
            elapsed = utils.elapsed_time(analysis_start_time)
            self.timing['analysis'] = elapsed
            self.timing['makefile_generation'] = 0

        else:
            if self.args.analyzer in [ERADICATE, CHECKERS]:
                infer_analyze.append('-intraprocedural')

            os.environ['INFER_OPTIONS'] = ' '.join(infer_options)

            multicore_dir = os.path.join(self.args.infer_out, 'multicore')
            pwd = os.getcwd()
            if os.path.isdir(multicore_dir):
                shutil.rmtree(multicore_dir)
            os.mkdir(multicore_dir)
            os.chdir(multicore_dir)
            analyze_cmd = infer_analyze + ['-makefile', 'Makefile']
            analyze_cmd += infer_options
            makefile_generation_start_time = time.time()
            makefile_status = run_command(
                analyze_cmd,
                self.args.debug,
                self.javac.original_arguments,
                'create_makefile',
                self.args.analyzer
            )
            elapsed = utils.elapsed_time(makefile_generation_start_time)
            self.timing['makefile_generation'] = elapsed
            exit_status += makefile_status
            if makefile_status == os.EX_OK:
                make_cmd = ['make', '-k', '-j', str(self.args.multicore)]
                if not self.args.debug:
                    make_cmd += ['-s']
                analysis_start_time = time.time()
                make_status = run_command(
                    make_cmd,
                    self.args.debug,
                    self.javac.original_arguments,
                    'run_makefile',
                    self.args.analyzer
                )
                elapsed = utils.elapsed_time(analysis_start_time)
                self.timing['analysis'] = elapsed
                os.chdir(pwd)
                exit_status += make_status

        if self.args.buck and exit_status == os.EX_OK:
            clean(self.args.infer_out, self.javac.annotations_out)

        return exit_status
Beispiel #28
0

def part1(puzzle_input: str, nth_spoken: int = 2020) -> int:
    *starting_numbers, current_number = [
        int(x) for x in puzzle_input.split(',')
    ]
    last_spoken_on_turn = {
        number: turn
        for turn, number in enumerate(starting_numbers, start=1)
    }
    for current_turn in range(len(starting_numbers) + 1, nth_spoken + 1):
        if current_number in last_spoken_on_turn:
            next_number = current_turn - last_spoken_on_turn[current_number]
        else:
            next_number = 0
        last_spoken_on_turn[current_number] = current_turn
        previous_number = current_number
        current_number = next_number
    return previous_number


def part2(puzzle_input: str) -> int:
    return part1(puzzle_input, 30000000)


if __name__ == '__main__':
    puzzle_input = '1,0,16,5,17,4'
    print_results(
        elapsed_time(part1, puzzle_input),  # 1294
        elapsed_time(part2, puzzle_input))  # 573522
Beispiel #29
0
        output = model(data, None)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        o = output.max(dim=1)[1]
        for i in range(len(target)):
            if target[i] == o[i]:
                train_acc += 1

    train_acc /= len(data_train)
    train_accs.append(train_acc)
    train_loss /= len(data_train)
    train_losses.append(train_loss)
    print('{} Loss: {}; Accuracy: {}'.format(
        elapsed_time(time() - start_train), train_loss, train_acc))

    start_val = time()
    model.eval()
    val_acc = 0
    val_loss = 0

    print('Evaluating:', end=' ')
    for batch in range(0, len(data_val), batch_size):
        data = data_val[batch:batch + batch_size]
        labels = Tensor(data[:, lag]).to('cuda')
        rl = recover_true_values(labels, min_d, max_d)[0]
        target = [int(rl[i][0] > 0) for i in range(len(rl))]
        target = Tensor(target).long().to('cuda')
        data = Tensor(data[:, :lag]).to('cuda')
        data += positional_encoder[None, :, :]
Beispiel #30
0
	try:
		zoom, y, x = url.split('_')
		assert zoom.isdigit() and x.isdigit() and y.isdigit(), "not digits"
		zoom = int(zoom)
		x = int(x)
		y = int(y)
		assert settings.MIN_ZOOM <= zoom <= settings.MAX_ZOOM, "bad zoom: %d" % zoom
	except AssertionError, err:
		logging.error(err.args[0])

	# Generate the tile
	try:
		then = datetime.now()
		logging.info("Generating tile %d_%d_%d" % (zoom, y, x))

		# Generate
		new_tile = tile.GoogleTile(zoom, x, y)
		img_data = new_tile.image_out()

		# Timing
		now = datetime.now()
		timediff = utils.elapsed_time((now-then).seconds)
		logging.info("Generating tile %d_%d_%d finished %s" % (zoom, y, x, timediff) )
		return img_data

	except DeadlineExceededError, err:
		logging.warning('%s error - failed at %s' % (str(err), datetime.now()))

	return None

Beispiel #31
0
        for line in f:
            if line == '\n':
                yield group
                group = []
            else:
                group.append(line.strip())
        yield group  # last group at end of file has no extra newline


def anyone_answers(group: List[str]) -> set:
    return set.union(*(set(x) for x in group))


def everyone_answers(group: List[str]) -> set:
    return set.intersection(*(set(x) for x in group))


def part1(filename: str) -> int:
    return sum(len(anyone_answers(group)) for group in groups(filename))


def part2(filename: str) -> int:
    return sum(len(everyone_answers(group)) for group in groups(filename))


if __name__ == '__main__':
    puzzle_input = 'input_day06.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 6630
        elapsed_time(part2, puzzle_input))  # 3437
Beispiel #32
0
def size(tiles: Tiles) -> int:
    maxx = max(abs(x) for x, y, z in tiles)
    maxy = max(abs(y) for x, y, z in tiles)
    maxz = max(abs(z) for x, y, z in tiles)
    return max(maxx, maxy, maxz)


def part1(filename: str) -> int:
    tiles = flip_tiles(filename)
    return sum(tiles.values())


def part2(filename: str) -> int:
    tiles = flip_tiles(filename)
    grid_size = size(tiles) + 1
    grid = empty_grid(grid_size)
    grid.update(tiles)
    for i in range(1, 101):
        grid.update(next_day(grid))
        if not i % 10:
            print(i, sum(grid.values()))
    return sum(grid.values())


if __name__ == '__main__':
    puzzle_input = 'input_day24.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 293
        elapsed_time(part2, puzzle_input))  # 3967
Beispiel #33
0
def main():

    # The start time
    tstart = time.time()

    # Get the command line options
    opt, arg = cmdline()

    assocfile = arg[0]
    inpath = arg[1]
    outpath = arg[2]

    # SWarp
    if not opt.SWarp:
        # Init the class
        c = combcat(assocfile,
                    datapath=inpath,
                    outpath=outpath,
                    verb='yes',
                    dryrun=opt.dryrun,
                    noSWarp=not opt.SWarp)
        c.get_filenames()
    else:
        # Init the class
        c = combcat(assocfile,
                    datapath=inpath,
                    outpath=outpath,
                    verb='yes',
                    dryrun=opt.dryrun,
                    noSWarp=not opt.SWarp)

        c.swarp_files(dryrun=not opt.SWarp,
                      conf="SWarp-common.conf",
                      combtype=opt.combtype)

    if opt.SWarpExtras:
        # Init the class
        c = combcat(assocfile,
                    datapath=inpath,
                    outpath=outpath,
                    verb='yes',
                    dryrun=opt.dryrun,
                    noSWarp=not opt.SWarp)

        c.get_filenames()

        c.swarp_extras(dryrun=not opt.SWarpExtras,
                       conf="SWarp-common.conf",
                       combtype=opt.combtype,
                       newfirm=opt.newfirm)

    if opt.Astro:
        c.get_astrometry(newfirm=opt.newfirm)

    if opt.Photo:
        c.get_zeropt(newfirm=opt.newfirm)

    # Make the detection image
    if opt.useMask:
        # Make the mask from the weights
        c.generate_masks(filters=('i', ), dryrun=opt.noMask)
        c.makeDetectionIma(filter='i')

    # SExtractor
    if opt.SEx:
        c.SEx(deblend=opt.deblend)

    # Dust Extinction Correction
    if opt.Dust:
        c.DustCorrection()

    # photometric redshifts
    if opt.BPZ:
        c.BuildColorCat(newfirm=opt.newfirm)
        c.runBPZ()

    # make RGB images (pngs)
    if opt.RGB:
        print('make rgb')
        c.make_RGB(newfirm=opt.newfirm)

    # cleanup
    if opt.noCleanUP or not opt.SWarp:
        if opt.noCleanUP or opt.SWarpExtras:
            pass
        else:
            print("CLEANUP!")
            c.cleanup_files()
    else:
        print("CLEANUP!")
        c.cleanup_files()

    elapsed_time(tstart, c.tilename)
    return
Beispiel #34
0
    def analyze(self):
        logging.info("Starting analysis")
        infer_analyze = [utils.get_cmd_in_bin_dir(INFER_ANALYZE_BINARY), "-results_dir", self.args.infer_out]
        infer_options = []

        # remove specs if possible so that old issues are less likely
        # to be reported
        infer_options += ["-allow_specs_cleanup"]

        if self.args.analyzer == ERADICATE:
            infer_options += ["-checkers", "-eradicate"]
        elif self.args.analyzer == CHECKERS:
            infer_options += ["-checkers"]
        else:
            if self.args.analyzer == TRACING:
                infer_options.append("-tracing")
            if os.path.isfile(utils.MODELS_JAR):
                infer_options += ["-models", utils.MODELS_JAR]

        if self.args.infer_cache:
            infer_options += ["-infer_cache", self.args.infer_cache]

        if self.args.ml_buckets:
            infer_options += ["-ml_buckets", self.args.ml_buckets]

        if self.args.notest:
            infer_options += ["-notest"]

        if self.args.debug:
            infer_options += [
                "-developer_mode",
                "-html",
                "-dotty",
                "-print_types",
                "-trace_error",
                "-print_buckets",
                # '-notest',
            ]

        if self.args.incremental:
            if self.args.changed_only:
                infer_options.append("-incremental_changed_only")
            else:
                infer_options.append("-incremental")

        if self.args.specs_dirs:
            infer_options += self.args.specs_dirs

        exit_status = os.EX_OK

        if self.args.buck:
            infer_options += ["-project_root", os.getcwd(), "-java"]
            if self.javac.args.classpath is not None:
                for path in self.javac.args.classpath.split(os.pathsep):
                    if os.path.isfile(path):
                        infer_options += ["-ziplib", os.path.abspath(path)]
        elif self.args.project_root:
            infer_options += ["-project_root", self.args.project_root]

        if self.args.multicore == 1:
            analysis_start_time = time.time()
            analyze_cmd = infer_analyze + infer_options
            exit_status = run_command(
                analyze_cmd, self.args.debug, self.javac.original_arguments, "analysis", self.args.analyzer
            )
            elapsed = utils.elapsed_time(analysis_start_time)
            self.timing["analysis"] = elapsed
            self.timing["makefile_generation"] = 0

        else:
            if self.args.analyzer in [ERADICATE, CHECKERS]:
                infer_analyze.append("-intraprocedural")

            os.environ["INFER_OPTIONS"] = " ".join(infer_options)

            multicore_dir = os.path.join(self.args.infer_out, "multicore")
            pwd = os.getcwd()
            if os.path.isdir(multicore_dir):
                shutil.rmtree(multicore_dir)
            os.mkdir(multicore_dir)
            os.chdir(multicore_dir)
            analyze_cmd = infer_analyze + ["-makefile", "Makefile"]
            analyze_cmd += infer_options
            makefile_generation_start_time = time.time()
            makefile_status = run_command(
                analyze_cmd, self.args.debug, self.javac.original_arguments, "create_makefile", self.args.analyzer
            )
            elapsed = utils.elapsed_time(makefile_generation_start_time)
            self.timing["makefile_generation"] = elapsed
            exit_status += makefile_status
            if makefile_status == os.EX_OK:
                make_cmd = ["make", "-k", "-j", str(self.args.multicore)]
                if not self.args.debug:
                    make_cmd += ["-s"]
                analysis_start_time = time.time()
                make_status = run_command(
                    make_cmd, self.args.debug, self.javac.original_arguments, "run_makefile", self.args.analyzer
                )
                elapsed = utils.elapsed_time(analysis_start_time)
                self.timing["analysis"] = elapsed
                os.chdir(pwd)
                exit_status += make_status

        if self.args.buck and exit_status == os.EX_OK:
            clean(self.args.infer_out)

        return exit_status
Beispiel #35
0
def find_loop_size(public_key: int, subject_number: int) -> int:
    value = 1
    loop_size = 0
    while value != public_key:
        value = transform(value, subject_number)
        loop_size += 1
    return loop_size


def find_encryption_key(card_key: int, door_key: int,
                        subject_number: int) -> int:
    card_loop_size = find_loop_size(card_key, subject_number)
    print(card_loop_size)
    value = 1
    for _ in range(card_loop_size):
        value = transform(value, subject_number=door_key)
    return value


def part1(filename: str) -> int:
    card_key, door_key = read_public_keys(filename)
    subject_number = 7
    encryption_key = find_encryption_key(card_key, door_key, subject_number)
    return encryption_key


if __name__ == '__main__':
    puzzle_input = 'input_day25.txt'
    print_results(elapsed_time(part1, puzzle_input))  # 3015200
Beispiel #36
0
def part2_parser() -> ParserElement:
    integer = ppc.integer
    integer.setParseAction(EvalConstant)
    add_op = "+"
    mul_op = "*"
    expr = infixNotation(
        integer,
        [
            (add_op, 2, opAssoc.LEFT, EvalAddOp),  # + takes precedence over *
            (mul_op, 2, opAssoc.LEFT, EvalMulOp),
        ])
    return expr


def part1(filename: str) -> int:
    parser = part1_parser()
    return sum(
        evaluate(expression, parser) for expression in stream_input(filename))


def part2(filename: str) -> int:
    parser = part2_parser()
    return sum(
        evaluate(expression, parser) for expression in stream_input(filename))


if __name__ == '__main__':
    puzzle_input = 'input_day18.txt'
    print_results(elapsed_time(part1, puzzle_input),
                  elapsed_time(part2, puzzle_input))
Beispiel #37
0
                paths[neighbor] += paths[adapter]
            else:
                paths[neighbor] = paths[adapter]
    return paths


def part1(filename: str) -> int:
    adapters = connected_adapters(filename)
    outlet = 0
    device = max(adapters) + 3
    joltages = [outlet] + adapters + [device]
    differences = Counter(joltage_differences(joltages))
    return differences[1] * differences[3]


def part2(filename: str) -> int:
    adapters = connected_adapters(filename)
    outlet = 0
    device = max(adapters) + 3
    joltages = [outlet] + adapters + [device]
    neighbors = build_connection_graph(joltages)
    num_paths = count_paths(neighbors)
    return num_paths[device]


if __name__ == '__main__':
    puzzle_input = 'input_day10.txt'
    print_results(
        elapsed_time(part1, puzzle_input),  # 1700
        elapsed_time(part2, puzzle_input))  # 12401793332096
Beispiel #38
0
def lazy_prm(start_conf,
             end_conf,
             sample_fn,
             extend_fn,
             collision_fn,
             num_samples=100,
             max_degree=10,
             weights=None,
             p_norm=2,
             max_distance=INF,
             approximate_eps=0.0,
             max_cost=INF,
             max_time=INF,
             max_paths=INF):
    # TODO: multi-query motion planning
    start_time = time.time()
    # TODO: can embed pose and/or points on the robot for other distances
    if weights is None:
        weights = np.ones(len(start_conf))
    embed_fn = lambda q: weights * q
    distance_fn = lambda q1, q2: np.linalg.norm(embed_fn(q2) - embed_fn(q1),
                                                ord=p_norm)
    cost_fn = lambda v1, v2: distance_fn(samples[v1], samples[v2])
    # TODO: can compute cost between waypoints from extend_fn

    samples = []
    while len(samples) < num_samples:
        conf = sample_fn()
        if (distance_fn(start_conf, conf) +
                distance_fn(conf, end_conf)) < max_cost:
            samples.append(conf)
    start_index, end_index = 0, 1
    samples[start_index] = start_conf
    samples[end_index] = end_conf

    embedded = list(map(embed_fn, samples))
    kd_tree = KDTree(embedded)
    vertices = list(range(len(samples)))
    edges = set()
    for v1 in vertices:
        # TODO: could dynamically compute distances
        distances, neighbors = kd_tree.query(embedded[v1],
                                             k=max_degree + 1,
                                             eps=approximate_eps,
                                             p=p_norm,
                                             distance_upper_bound=max_distance)
        for d, v2 in zip(distances, neighbors):
            if (d < max_distance) and (v1 != v2):
                edges.update([(v1, v2), (v2, v1)])
    neighbors_from_index = {v: set() for v in vertices}
    for v1, v2 in edges:
        neighbors_from_index[v1].add(v2)
    #print(time.time() - start_time, len(edges), float(len(edges))/len(samples))

    colliding_vertices, colliding_edges = {}, {}

    def neighbors_fn(v1):
        for v2 in neighbors_from_index[v1]:
            if not (colliding_vertices.get(v2, False) or colliding_edges.get(
                (v1, v2), False)):
                yield v2

    visited = dijkstra(end_index, neighbors_fn, cost_fn)
    heuristic_fn = lambda v: visited[v].g if v in visited else INF
    while elapsed_time(start_time) < max_time:
        # TODO: extra cost to prioritize reusing checked edges
        path = wastar_search(start_index,
                             end_index,
                             neighbors_fn=neighbors_fn,
                             cost_fn=cost_fn,
                             heuristic_fn=heuristic_fn,
                             max_cost=max_cost,
                             max_time=max_time - elapsed_time(start_time))
        if path is None:
            return None, edges, colliding_vertices, colliding_edges
        cost = sum(cost_fn(v1, v2) for v1, v2 in zip(path, path[1:]))
        print(
            'Length: {} | Cost: {:.3f} | Vertices: {} | Edges: {} | Time: {:.3f}'
            .format(len(path), cost, len(colliding_vertices),
                    len(colliding_edges), elapsed_time(start_time)))
        if check_path(path, colliding_vertices, colliding_edges, samples,
                      extend_fn, collision_fn):
            break

    solution = [start_conf]
    for q1, q2 in zip(path, path[1:]):
        solution.extend(extend_fn(samples[q1], samples[q2]))
    return solution, samples, edges, colliding_vertices, colliding_edges
Beispiel #39
0
        standev = np.std([
            np.mean(final_save["acc"][i * n_splits:(i + 1) * n_splits])
            for i in range(N_BOOTSTRAPS)
        ])
        to_print += " (+/- {:.2f})".format(standev)
    print(to_print)
    if PERM:
        print("pval = {}".format(final_save["acc_pvalue"]))


if __name__ == "__main__":
    TIMELAPSE_START = time()
    ARGS = sys.argv
    if len(ARGS) > 2:
        ARGS = sys.argv[1:]
    elif len(ARGS) == 2:
        ARGS = sys.argv[1:][0].split(":")
    else:
        ARGS = []

    if ARGS == []:
        from joblib import delayed, Parallel

        Parallel(n_jobs=1)(
            delayed(classif_subcosp)(st, fr, el, n_jobs=1)
            for st, fr, el in product(STATE_LIST, FREQ_DICT, CHANNEL_NAMES))
    else:
        print(ARGS)
        classif_subcosp(ARGS[0], ARGS[1], ARGS[2])
    print("total time lapsed : %s" % (elapsed_time(TIMELAPSE_START, time())))
Beispiel #40
0
    try:
        zoom, y, x = url.split('_')
        assert zoom.isdigit() and x.isdigit() and y.isdigit(), "not digits"
        zoom = int(zoom)
        x = int(x)
        y = int(y)
        assert settings.MIN_ZOOM <= zoom <= settings.MAX_ZOOM, "bad zoom: %d" % zoom
    except AssertionError, err:
        logging.error(err.args[0])

    # Generate the tile
    try:
        then = datetime.now()
        logging.info("Generating tile %d_%d_%d" % (zoom, y, x))

        # Generate
        new_tile = tile.GoogleTile(zoom, x, y)
        img_data = new_tile.image_out()

        # Timing
        now = datetime.now()
        timediff = utils.elapsed_time((now - then).seconds)
        logging.info("Generating tile %d_%d_%d finished %s" %
                     (zoom, y, x, timediff))
        return img_data

    except DeadlineExceededError, err:
        logging.warning('%s error - failed at %s' % (str(err), datetime.now()))

    return None