示例#1
0
文件: main.py 项目: meowpunch/LENNA
def pool_parallel(destination, p_num=4):
    # generate child process
    with MyPool(p_num) as pool:
        pool.map(
            Worker(load=load_dataset(batch_size=64), destination=destination),
            range(p_num))

    # wait all child process to work done
    pool.close()
    pool.join()

    # collect data
    collect_df(destination=destination, num=p_num)
    init_logger().info(
        "success to collect data into '{dest}'".format(dest=destination))
示例#2
0
文件: main.py 项目: orange9426/FOGs
def main():
    # Parse the arguments for each run
    args = parse_args()

    # Init the logger
    logger.init_logger(args['env'], args['solver'])

    # Init the environment
    env = getattr(env_module, args['env'])()

    # Init the solver
    solver = getattr(solver_module, args['solver'])(env, args)

    # Run the experiment
    run(solver, args)
示例#3
0
    def __init__(self,
                 x_train,
                 y_train,
                 bucket_name,
                 grid_params=None,
                 score=mean_squared_error,
                 estimator=ElasticNet):
        if grid_params is None:
            raise ValueError("grid params are needed")

        self.x_train = x_train
        self.y_train = y_train
        self.scorer = score

        self.error = None  # pd.Series
        self.metric = None

        # s3
        self.s3_manager = None

        # logger
        self.logger = init_logger()

        super().__init__(estimator=estimator(),
                         param_grid=grid_params,
                         scoring=make_scorer(self.scorer,
                                             greater_is_better=False))
示例#4
0
    def __init__(self, dataset_name):
        self.logger = init_logger()
        self.date = datetime.datetime.now().strftime("%m%Y")

        self.PreProcessor = PreProcessor(filename=dataset_name)
        self.dataset = self.PreProcessor.process(
        )  # self.build_dataset(filename=dataset_name)
示例#5
0
    def __init__(self,
                 x_train,
                 y_train,
                 bucket_name,
                 grid_params=None,
                 score=mean_squared_error):
        if grid_params is None:
            grid_params = {
                "max_iter": [1, 5, 10],
                "alpha": [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100],
                "l1_ratio": np.arange(0.0, 1.0, 0.1)
            }

        self.x_train = x_train
        self.y_train = y_train
        self.scorer = score

        self.error = None  # pd.Series
        self.metric = None

        # s3
        self.s3_manager = None

        # logger
        self.logger = init_logger()

        super().__init__(x_train, y_train, bucket_name, grid_params, score,
                         ElasticNet)
示例#6
0
    def __init__(self,
                 bucket_name,
                 x_train,
                 y_train,
                 params=None,
                 estimator=ElasticNet):
        # logger
        self.logger = init_logger()

        # s3
        self.s3_manager = None

        if params is None:
            self.model = estimator()
        else:
            self.model = estimator(**params)

        self.x_train, self.y_train = x_train, y_train

        # result
        self.x_true = None
        self.y_true = None
        self.pred = None
        self.error = None
        self.err_ratio = None
        self.metric = None
示例#7
0
    def __init__(self,
                 block_type,
                 input_channel,
                 num_layers,
                 dataset,
                 gpu_id=0,
                 parallel=False):
        self.logger = init_logger()

        # dataset
        self.test_loader = dataset
        self.model = LennaNet(num_blocks=[1],
                              num_layers=num_layers,
                              normal_ops=normal_ops,
                              reduction_ops=reduction_ops,
                              block_type=block_type,
                              input_channel=input_channel,
                              n_classes=10)  # for cifar10

        # allocate 4 processes to 4 gpu respectively
        device = 'cuda:{}'.format(
            gpu_id) if torch.cuda.is_available() else 'cuda'
        self.logger.info("assign to {}".format(device))

        self.device = torch.device(device)
        self.model.to(self.device)
        if device == 'cuda:{}'.format(gpu_id):
            # self.p_model = torch.nn.DataParallel(module=self.model)
            cudnn.benchmark = True
        self.model.eval()
示例#8
0
    def __init__(self,
                 x_train,
                 y_train,
                 bucket_name,
                 grid_params=None,
                 score=mean_squared_error):
        if grid_params is None:
            raise ValueError("grid params are needed")

        self.x_train = x_train
        self.y_train = y_train
        self.scorer = score

        self.error = None  # pd.Series
        self.metric = None

        # s3
        self.s3_manager = None

        # logger
        self.logger = init_logger()

        super().__init__(
            estimator=MLPRegressor(learning_rate='adaptive'),
            param_grid=grid_params,
            scoring='neg_mean_absolute_error',
        )
示例#9
0
 def __init__(self, counts=1000, size=(256, 16, 224, 224)):
     self.logger = init_logger()
     self.counts = counts
     # batch_size(32 or 64) X depth X width X height
     self.X = (torch.rand(size=size).uniform_() > 0.8).float()
     self.models = [
         MyModel1(), MyModel3()
     ]  # [MyModel1(), MyModel2(), MyModel3()]  # [MyModel4(), Parallel1(), Parallel2()]
示例#10
0
文件: main.py 项目: meowpunch/LENNA
def main(arg="parallel"):
    destination = "training_data/data"
    if arg is "parallel":
        logger = init_logger()
        logger.info("director id: %s" % (os.getpid()))
        parallel(destination=destination, outer_loop=2, inner_loop=1, p_num=4)
    else:
        single(destination=destination)
示例#11
0
 def __init__(self, sub_pid=0, g_type="random", b_type=None, in_ch=None):
     """
         block type: 0 -> reduction , 1-> normal
         input_channel: 1~1000
         num_layers: fix 5 or 6
     """
     self.logger = init_logger()
     self.sub_pid = sub_pid
示例#12
0
    def __init__(self, config, maze):
        self._logger = init_logger(self)
        self._config = config
        self._maze = maze
        self._training_mode = None

        self.id = 1
        self.location = None

        self.before_move = Event()
        self.after_move = Event()
示例#13
0
 def __init__(self, config, maze):
     self._logger = init_logger(self)
     self._config = config
     self._maze = maze
     self._training_mode = None
     
     self.id = 1
     self.location = None
     
     self.before_move = Event()
     self.after_move = Event()
示例#14
0
文件: main.py 项目: meowpunch/LENNA
def collect_df(destination, num):
    # collect df from csv files
    combined_df = pd.concat(
        [pd.read_csv(destination + str(i)) for i in range(num)], axis=0)

    # save
    if os.path.isfile(destination) is True:
        combined_df.to_csv(destination, mode='w', index=False, header=True)
    else:
        combined_df.to_csv(destination, mode='a', index=False, header=False)

    # check
    init_logger().info("final saved df's tail 5: \n{df}".format(
        df=pd.read_csv(destination).tail(5)))

    # delete
    for i in range(num):
        if os.path.exists(destination + str(i)):
            os.remove(destination + str(i))

    return 0
示例#15
0
文件: index.py 项目: qorzj/damahouzi
def global_wrapper(labor):
    """
    wrapper: logger, database, json
    """
    web.ctx.log = logger.init_logger()
    web.ctx.db = engine.Session()
    try:
        ret = labor()
    finally:
        web.ctx.db.close()
    web.header('Content-Type', 'application/json')
    web.header('Access-Control-Allow-Origin', '*')
    return json.dumps(ret, cls=engine.AlchemyEncoder)
示例#16
0
def draw_hist(s, h_type: str = "dist", name: str = None):
    plt.figure()
    h_method = {
        "dist": sns.distplot,
        "count": sns.countplot,
    }
    try:
        method = h_method[h_type]
    except KeyError:
        # TODO: handle exception
        init_logger().critical(
            "histogram type '{h_type}' is not supported".format(h_type=h_type))
        sys.exit()

    if isinstance(s, pd.Series):
        plt.title('{name} histogram'.format(name=s.name))
        method(s)
        plt.show()
    else:
        # for jupyter notebook
        plt.title('{name} histogram'.format(name=name))
        return list(map(lambda series: method(series), s))
示例#17
0
    def __init__(self, model, dataset, gpu_id=0, parallel=False):
        self.logger = init_logger()

        # dataset

        self.test_loader = dataset

        # model
        self.model = model

        # allocate 4 processes to 4 gpu respectively
        device = 'cuda:{}'.format(gpu_id) if torch.cuda.is_available() else 'cuda'
        self.logger.info("assign to {}".format(device))

        self.device = torch.device(device)
        torch.cuda.set_device(self.device)

        self.model.to(self.device)
        if device == 'cuda:{}'.format(gpu_id):
            # self.p_model = torch.nn.DataParallel(module=self.model)
            cudnn.benchmark = True
        self.model.eval()
示例#18
0
文件: core.py 项目: meowpunch/LENNA
    def __init__(self, idx, destination, lock):
        self.logger = init_logger()
        self.sub_pid = idx
        self.lock = lock

        # constant
        self.normal_ops = [
            '3x3_Conv',
            '5x5_Conv',
            '3x3_ConvDW',
            '5x5_ConvDW',
            '3x3_dConv',
            '5x5_dConv',
            '3x3_dConvDW',
            '5x5_dConvDW',
            '3x3_maxpool',
            '3x3_avgpool',
            # 'Zero',
            'Identity',
        ]
        self.reduction_ops = [
            '3x3_Conv',
            '5x5_Conv',
            '3x3_ConvDW',
            '5x5_ConvDW',
            '3x3_dConv',
            '5x5_dConv',
            '3x3_dConvDW',
            '5x5_dConvDW',
            '2x2_maxpool',
            '2x2_avgpool',
        ]

        self.destination = destination  # + str(idx)

        self.df = None
示例#19
0
import json

from index import app
from util import logger

LOG = logger.init_logger()


def test_normal_scene():
    parsec_url = 'http://www.parsec.com.cn'
    
    ret = app.request('/add', 'POST', {'url': parsec_url})
    LOG.info(ret)
    ret_data = json.loads(ret.data)
    assert ret_data['code'] == 0
    key = ret_data['key']
    
    ret = app.request('/_' + key)
    LOG.info(ret)
    assert ret.status == '303 See Other' and ret.headers['Location'] == parsec_url
    
    ret = app.request('/manager/clear', 'POST')
    ret_data = json.loads(ret.data)
    assert ret_data['code'] == 0
    
    ret = app.request('/_' + key)
    ret_data = json.loads(ret.data)
    assert ret_data['code'] == 1


def setup():
示例#20
0
                             'frames_processed':
                             frames_processed,
                             'frame_rate':
                             processing_frame_rate,
                             'frames_left':
                             frames_count - frames_processed,
                             'percentage_processed':
                             round((frames_processed / frames_count) * 100, 2),
                         },
                     })

        ret, frame = cap.read()

    # end capture, close window, close log file and video object if any
    cap.release()
    if not headless:
        cv2.destroyAllWindows()
    if record:
        output_video.release()
    logger.info('Processing ended.', extra={'meta': {'label': 'END_PROCESS'}})


if __name__ == '__main__':
    from dotenv import load_dotenv
    load_dotenv()

    from util.logger import init_logger
    init_logger()

    run()
示例#21
0
OS: {}
Python: {}
System: {} on {}/{}
Working directory: {}
Commandline: {}
============================
Log file: {}
============================
    """.format(version.__version__, version.__release_date__,
               platform.platform(), sys.version, getpass.getuser(), hostname,
               ip, get_runtime_path(), ' '.join(sys.argv),
               logger.current_log_file_path)

    return preamble


# Actual bootstrapping code, executed with this module is imported
try:
    if not BOOTSTRAP_DONE:
        import util.logger as logger
        logger.init_logger(get_runtime_path())
        _logger = logger.get_logger(__name__)
        # This will print on the console as well as the log file, so we know when a new execution started
        _logger.info(get_preamble())
        BOOTSTRAP_DONE = True
    else:
        _logger.debug("Bootstrap already complete")
except:
    traceback.print_exc()
    sys.stderr.write('Failed to bootstrap the application.\n')
示例#22
0
 def __init__(self):
     self._size_list = None
     self.logger = init_logger()
     self.latency_list = []
     super(MyModule, self).__init__()
示例#23
0
def test_init_logger():
    assert job_id not in logging.root.manager.loggerDict, "job id is not initialized"

    init_logger()
    assert job_id in logging.root.manager.loggerDict, "job id is initialized"
示例#24
0
def main():
    # Parse the arguments for each run
    args = {
        'env': 'Tiger',
        'solver': 'MEPOP',
        'discount': 1,
        'n_epochs': 1000,
        'quiet': True,
        'n_sims': 100,
        'n_start_states': 200,
        'min_particle_count': 100,
        'max_particle_count': 200,
        'max_depth': 100,
        'uct_coefficient': 100,
        'me_tau': 40,
        'me_epsilon': 0
    }

    env = getattr(env_module, args['env'])()

    # # Test the arguments of POMCP
    # args['solver'] = 'POMCP'
    # logger.init_logger(args['env'], args['solver'])

    # for uct_c in range(0, 160, 10):
    #     args['uct_coefficient'] = uct_c

    #     solver = getattr(solver_module, args['solver'])(env, args)
    #     run(solver, args)

    # # Test the arguments of MEPOP
    # args['solver'] = 'MEPOP'
    # logger.init_logger(args['env'], args['solver'])

    # for me_tau in np.arange(0.2, 2.2, 0.2):
    #     for me_epsilon in [0.75]:
    #         args['me_tau'] = me_tau
    #         args['me_epsilon'] = me_epsilon

    #         solver = getattr(solver_module, args['solver'])(env, args)
    #         run(solver, args)

    # Test the POMCP
    args['solver'] = 'POMCP'
    logger.init_logger(args['env'], args['solver'])
    pomcp_history = []

    for n_sims in range(50, 1050, 50):
        args['n_sims'] = n_sims

        solver = getattr(solver_module, args['solver'])(env, args)
        result = run(solver, args)
        pomcp_history.append(result.undiscounted_return.mean)

    # Test the MEPOP
    args['solver'] = 'MEPOP'
    logger.init_logger(args['env'], args['solver'])
    mepop_history = []

    for n_sims in range(50, 1050, 50):
        args['n_sims'] = n_sims

        solver = getattr(solver_module, args['solver'])(env, args)
        result = run(solver, args)
        mepop_history.append(result.undiscounted_return.mean)

    df = pd.DataFrame({'pomcp': pomcp_history, 'mepop': mepop_history})
    with open('results/csv/tiger_pomcp_and_mepop.csv', 'w'):
        df.to_csv('results/csv/tiger_pomcp_and_mepop.csv')