def setUp(self):
     init_logger()
     try:
         os.chdir(PACKAGE_DIRECTORY_COM)
     except OSError:
         pass
     # pylint: disable=line-too-long
     storage_command_generator.USER_NAME = "test-user"
     storage_command_generator.JOB_NAME = "job"
     storage_command_generator.STORAGE_CONFIGS = "[\"STORAGE_NFS\", \"STORAGE_TEST\", \"STORAGE_SAMBA\", \"STORAGE_AZURE_FILE\", \"STORAGE_AZURE_BLOB\"]"
     storage_command_generator.KUBE_APISERVER_ADDRESS = "http://api_server_url:8080"
Example #2
0
def plugin_init():
    init_logger()
    parser = argparse.ArgumentParser()
    parser.add_argument("plugin_config",
                        help="plugin config for runtime plugin in yaml")
    parser.add_argument("pre_script", help="script for pre commands")
    parser.add_argument("post_script", help="script for post commands")
    args = parser.parse_args()

    plugin_config = yaml.safe_load(args.plugin_config)

    return [plugin_config, args.pre_script, args.post_script]
Example #3
0
        :param test_size: size of test set. It must be a float between 0 and 1
        :param random_state: random state / bean
        :param shuffle: boolean that determines if data should be shuffled
        :param stratify: If True, data is split in a stratified fashion
        :return:
        """
        train_x, test_x, train_y, test_y = \
            self.get_train_test_split(test_size=test_size,
                                      random_state=random_state,
                                      shuffle=shuffle,
                                      stratify=stratify)
        self.save_train_test_sets(inputs=(train_x, test_x),
                                  targets=(train_y, test_y),
                                  names=('training', 'test'))


if __name__ == '__main__':
    cfg = load_config('./conf/conf.yaml')
    init_logger(cfg['logging'], cfg['logging']['name'])
    prep = Preprocessor(cfg['data']['raw_path'],
                        cfg['preprocess']['criteria'],
                        cfg['data']['file_name_format'],
                        cfg['data']['classes_list'],
                        cfg['preprocess']['classes_ranges'],
                        cfg['preprocess']['dest_path'])
    prep.run(test_size=cfg['preprocess']['test_size'],
             random_state=cfg['preprocess']['random_state'],
             shuffle=cfg['preprocess']['shuffle'],
             stratify=True)

Example #4
0
import sys
import unittest

import yaml

# pylint: disable=wrong-import-position
sys.path.append(
    os.path.join(os.path.dirname(os.path.abspath(__file__)), "../src"))
sys.path.append(
    os.path.join(os.path.dirname(os.path.abspath(__file__)), "../src/init.d"))
import image_checker
from common.utils import init_logger
# pylint: enable=wrong-import-position

PACKAGE_DIRECTORY_COM = os.path.dirname(os.path.abspath(__file__))
init_logger()


# pylint: disable=protected-access
def prepare_image_check(job_config_path):
    def decorator(func):
        @functools.wraps(func)
        def wrapper(self, *args, **kwargs):
            os.environ["FC_TASKROLE_NAME"] = "worker"
            if os.path.exists(job_config_path):
                with open(job_config_path, 'r') as f:
                    self.config = yaml.load(f, Loader=yaml.FullLoader)
                func(self, *args, **kwargs)
            del os.environ["FC_TASKROLE_NAME"]

        return wrapper

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("job_config", help="job config yaml")
    parser.add_argument("secret_file", help="secret file path")
    args = parser.parse_args()

    LOGGER.info("get job config from %s", args.job_config)
    with open(args.job_config) as config:
        job_config = yaml.safe_load(config)

    if not os.path.isfile(args.secret_file):
        job_secret = None
    else:
        with open(args.secret_file) as f:
            job_secret = yaml.safe_load(f.read())

    LOGGER.info("Start checking docker image")
    image_checker = ImageChecker(job_config, job_secret)
    try:
        if not image_checker.is_docker_image_accessible():
            sys.exit(1)
    except Exception:  #pylint: disable=broad-except
        LOGGER.warning("Failed to check image", exc_info=True)


if __name__ == "__main__":
    utils.init_logger()
    main()
Example #6
0
        block_list = Block.objects.get()
        logger.debug('Block list count {}'.format(len(block_list)))

        block_chain = BlockchainFactory.build_blockchain(block_list)
        logger.debug('Last block hash {}'.format(block_chain.last_block_hash))

        file = open(settings['builder_tree_path'], 'w')
        file.write(json.dumps(block_chain.__dict__))

        logger.info('Finished blockchain tree building')
    except Exception as e:
        logger.error(e)


if __name__ == "__main__":
    init_logger(settings)
    logger.info('Starting the application')

    app = tornado.web.Application([
        (r"/", BuilderListener),
    ])
    app.listen(settings['builder_server']['port'])

    io_loop = tornado.ioloop.IOLoop.current()
    scheduler = tornado.ioloop.PeriodicCallback(build_tree, 60000)

    logger.info('Listening for connections on {}:{}'.format(
        settings['miner_server']['ip'], settings['miner_server']['port']))
    scheduler.start()
    io_loop.start()
Example #7
0
'''
Created on May 22, 2016

@author: ajaniv
'''
import logging
import time
import Queue
from common import utils
logger = logging.getLogger(__name__)

utils.init_logger(logger)


def read_file_using_queues(i, work_queue, results_queue):
    while not work_queue.empty():
        logger.debug('%s: fetching next file', i)
        try:
            file_name = work_queue.get(block=False)
            logger.debug('%s: loading: %s', i, file_name)
            time.sleep(i + 2)
            results_queue.put((0, i, file_name))
            work_queue.task_done()
        except Queue.Empty:
            pass
    logger.debug('%s: exiting', i)


def read_file_wrapper(a_b):
    return read_file(*a_b)
Example #8
0
from models.train import distributed_train, test
from models.utils import get_model
from viz.training_plots import training_plots

print = functools.partial(print, flush=True)
torch.set_printoptions(linewidth=120)

# ------------------------------------------------------------------------------
# Setups
# ------------------------------------------------------------------------------

args = Arguments(argparser())
hook = sy.TorchHook(torch)
device = get_device(args)
paths = get_paths(args, distributed=True)
log_file, std_out = init_logger(paths.log_file, args.dry_run, args.load_model)
if os.path.exists(paths.tb_path):
    shutil.rmtree(paths.tb_path)
tb = SummaryWriter(paths.tb_path)

print('+' * 80)
print(paths.model_name)
print('+' * 80)

print(args.__dict__)
print('+' * 80)

# prepare graph and data
_, workers = get_fl_graph(hook, args.num_workers)
print('Loading data: {}'.format(paths.data_path))
X_trains, _, y_trains, _, meta = pkl.load(open(paths.data_path, 'rb'))
Example #9
0
ap.add_argument("--dataset", required=True, type=str)
ap.add_argument("--num-workers", required=True, type=int)
ap.add_argument("--non-iid", required=True, type=int)
ap.add_argument("--repeat", required=False, type=int, default=True)
ap.add_argument("--shuffle", required=False, type=booltype, default=True)
ap.add_argument("--stratify", required=False, type=booltype, default=True)
ap.add_argument("--uniform-data", required=False, type=booltype, default=False)
ap.add_argument("--dry-run", required=False, type=booltype, default=False)

args = vars(ap.parse_args())
args = Struct(**args)

filename = get_data_path(cfg.ckpt_path, args)
folder = '{}_{}'.format(args.dataset, args.num_workers)
log_file, std_out = init_logger(
    os.path.join(cfg.ckpt_path, folder,
                 'logs/data_non_iid_{}.log'.format(args.non_iid)))

num_train = cfg.num_trains[args.dataset]
num_test = cfg.num_tests[args.dataset]
num_classes = cfg.output_sizes[args.dataset]

kwargs = {}

train_loader = get_trainloader(args.dataset, num_train)
test_loader = get_testloader(args.dataset, num_test)

for data, target in train_loader:
    X_train = data
    y_train = target
Example #10
0
from models.multi_class_hinge_loss import multiClassHingeLoss
from models.utils import get_model
from models.train import test, sdirs_approximation

print = functools.partial(print, flush=True)
torch.set_printoptions(linewidth=120)

# ------------------------------------------------------------------------------
# Setups
# ------------------------------------------------------------------------------

args = Arguments(argparser())
hook = sy.TorchHook(torch)
device = get_device(args)
paths = get_paths(args)
log_file, std_out = init_logger(paths.log_file, args.dry_run)
if os.path.exists(paths.tb_path):
    shutil.rmtree(paths.tb_path)
tb = SummaryWriter(paths.tb_path)

print('+' * 80)
print(paths.model_name)
print('+' * 80)

print(args.__dict__)
print('+' * 80)

if args.batch_size == 0:
    args.batch_size = args.num_train
    print("Resetting batch size: {}...".format(args.batch_size))