Ejemplo n.º 1
0
async def main():
    async def evolution_step(e):
        e.mutation(attempts=4000)
        e.crossover(attempts=4000)
        await e.compute()
        e.selection(eval_param=0.05, save_n_best=3)

    logger = FileLogger('logout')
    iteration, step = 1, 0
    the_best_value = 0.0
    while step < stop_step:
        logger(f"Iteration: {iteration}\n")

        await asyncio.gather(*(evolution_step(e) for e in evolution))
        for e in evolution:
            e.print_info(iter=iteration)

        cur_best_value = max([e.get_best_protein().value for e in evolution])
        if the_best_value < cur_best_value:
            the_best_value = cur_best_value
            step = 0
        else:
            step += 1

        logger(f"The best value: {the_best_value}\n"
               f"Step/Stop {step}/{stop_step}\n\n")

        iteration += 1
Ejemplo n.º 2
0
def process(process_dir=None):
    if process_dir is None:
        process_dir = [
            v for v in globals().values() if isinstance(v, TrainingData)
        ]

    logger = MultiLogger(
        FileLogger(),
        HtmlLogger(open('output.html', 'w+'), os.path.abspath('thumb')))
    max_size = None  #(800, 600)
    for obj in process_dir:
        if obj.kind == 'good':
            obj.process(max_size=max_size, logger=logger)
    for obj in process_dir:
        if obj.kind == 'bad':
            obj.process(max_size=max_size, logger=logger)
    def process(self, max_size=None, logger=None):
        if logger is None:
            logger = FileLogger()
        im = self.load(max_size)
        if im is None:
            logger.result(self.imgpath, {'Error': "Failed to load"},
                          self.bad_qualities, self.compare)
            return None
        remaining = self.measures[:]
        results = {}
        # rudimentary runner that only executes ones that have the given requirements
        while len(remaining) > 0:
            still_remaining = []
            for mod in remaining:
                # verify prereqs
                abort = False
                for prereq in mod.requires_result_from:
                    if prereq not in results:
                        still_remaining.append(mod)
                        abort = True
                        break
                if abort:
                    print "Failed to meet req for:", self._get_name(mod)
                    continue

                kwargs = {}
                for name in mod.requires_result_from:
                    kwargs[name] = results[name]
                results[self._get_name(mod)] = self.execute(im, mod, kwargs)

            msg = 'Unresolve dependencies. I have %s to satisfy %s.' % (
                ','.join(results.keys()), ','.join(
                    map(
                        lambda x: self._get_name(x) + "(" + ','.join(
                            x.requires_result_from) + ")", remaining)))

            assert len(remaining) != len(still_remaining), msg
            remaining = still_remaining

        logger.result(self.imgpath, results, self.bad_qualities, self.compare)
Ejemplo n.º 4
0
from flask import Flask, jsonify, abort
from config_class import Config
from logger import FileLogger, ConsoleLogger
from collector import DbHandler
config = Config()

# get db name from yaml file
db_name = config.db['db_name']

file_logger = FileLogger()
console_logger = ConsoleLogger()
db_handler = DbHandler(db_name=db_name, index_list='')

app = Flask(__name__)


# get_collections
@app.route('/api/indices', methods=['GET'])
def get_collections():
    return jsonify({'collections': db_handler.get_db_collections()})


# get_by_collection_name
@app.route('/api/index/<index>', methods=['GET'])
def get_collection_data(index):
    is_index_exist(index)
    return jsonify({'results': db_handler.get_collection_data(index)})


# get_last_entry_from_collection
@app.route('/api/index/<index>/last', methods=['GET'])
Ejemplo n.º 5
0
                subprocess.check_call(args + [INTERPRETER_AUTODETECTION_FLAG],
                                      stdout=stdout,
                                      stderr=STDOUT)
            except CalledProcessError:
                continue
            if INTERPRETER_AUTODETECTION_VERBOSE_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv:
                print('Re-launching using command line: ' + ' '.join(args))
            os.execv(args[0], args)
    print(
        'Error: One or more required python libraries have not been installed: '
        + str(e))
    sys.exit(1)

if not autodetecting:
    logger.register(ConsoleLogger())
    logger.register(FileLogger())

    if EXECUTABLE_AUTODETECTION_VERSION_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv:
        import detect
        detect.VERBOSE_DETECT = True

    try:
        PaladinLinuxClient().run(
            mask_exceptions=(not EXCEPTION_BACKTRACE_FLAG in sys.argv and
                             not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv),
            handle_errors=(not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv))
    except KeyboardInterrupt as e:
        print('')
        sys.exit(1)
    finally:
        logger.shutdown()
Ejemplo n.º 6
0
                        help='how long to wait before shutting down on error')

    parser.add_argument('--short-epoch',
                        action='store_true',
                        help='make epochs short (for debugging)')
    return parser


cudnn.benchmark = True
args = get_parser().parse_args()

# Only want master rank logging to tensorboard
is_master = (not args.distributed) or (dist_utils.env_rank() == 0)
is_rank0 = args.local_rank == 0
tb = TensorboardLogger(args.logdir, is_master=is_master)
log = FileLogger(args.logdir, is_master=is_master, is_rank0=is_rank0)


def main():
    os.system('shutdown -c')  # cancel previous shutdown command
    log.console(args)
    tb.log('sizes/world', dist_utils.env_world_size())

    # need to index validation directory before we start counting the time
    dataloader.sort_ar(args.data + '/validation')

    if args.distributed:
        log.console('Distributed initializing process group')
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
Ejemplo n.º 7
0
log_text_area.pack(fill=BOTH)

log_text_area.bind('<Command-a>', select_all_text)
# log_text_area.bind('<Control-a>', select_all_text)

# ======== Odometry and Measurement Logging Frame ========
frm_pos = Frame(mw,
                width=511,
                height=330,
                highlightbackground='gray',
                highlightcolor='black',
                highlightthickness=2)
frm_pos.place(x=603, y=430, width=511, height=330)

pos_text_area = ScrolledText(
    master=frm_pos,
    # wrap=mw.WORD,
    highlightthickness=0)
pos_text_area.configure(state='disabled')
pos_text_area.pack(fill=BOTH)

pos_text_area.bind('<Command-a>', select_all_text)

setup_window_control(
    mw, Logger(log_text_area), FileLogger(pos_text_area, LOG_FILE),
    (ax_traj, fig_traj, fig_photo_traj, fig_canvas_agg_traj),
    (ax_vstream, fig_vstream, fig_photo_vstream, fig_canvas_agg_vstream),
    NAO_IP, NAO_PORT)
setup_menu(mw)
mw.mainloop()
Ejemplo n.º 8
0
 def __init__(self, logger=None):
     self.request_count = 0
     self.tasks = {}
     self.logger = logger or FileLogger()
     self.logger.log('--------------Server started--------------')