Exemplo n.º 1
0
def create_task():
    """PUT for kanban new task"""
    print("creating...")
    task_id, user, date = tasks.create_task(body=request.form.get('text'),
                                            user=request.form.get('user'))
    new_task = {"id": task_id, "user": user, "date": date}
    return new_task
Exemplo n.º 2
0
def train(args):
    """
    Train main function.
    """
    if args.is_distributed:
        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)

        dev_count = fluid.core.get_cuda_device_count()
        gpu_id = int(os.getenv("FLAGS_selected_gpus"))
        trainers_num = fleet.worker_num()
        trainer_id = fleet.worker_index()
    else:
        dev_count = 1
        gpu_id = 0
        trainers_num = 1
        trainer_id = 0
    place = fluid.CUDAPlace(gpu_id)

    task = tasks.create_task(args)
    model = models.create_model(args, place)
    train_generator = task.reader.data_generator(input_file=args.train_file,
                                                 num_epochs=args.num_epochs,
                                                 num_part=trainers_num,
                                                 part_id=trainer_id,
                                                 phase="train")
    valid_generator = task.reader.data_generator(
        input_file=args.valid_file,
        num_part=dev_count,
        part_id=gpu_id,
        phase="distributed_valid" if args.is_distributed else "valid")

    # run training
    model_timer = Timer()
    for step, data in enumerate(train_generator(), 1):
        model_timer.start()
        metrics = task.train_step(model, data)
        model_timer.pause()
        if step % args.log_steps == 0:
            time_cost = model_timer.pass_time
            current_epoch, current_file_index, total_file = task.reader.get_train_progress(
            )
            print(
                f"[train][{current_epoch}] progress: {current_file_index}/{total_file} "
                f"step: {step}, time: {time_cost:.3f}, "
                f"speed: {args.log_steps / time_cost:.3f} steps/s")
            print("\tcurrent lr:", metrics.pop('scheduled_lr'))
            print("\t" + task.show_metrics(metrics))
            model_timer.reset()

        if step % args.validation_steps == 0:
            evaluate(task, model, valid_generator, args, dev_count, gpu_id)

        if step % args.save_steps == 0:
            save_path = f"{args.save_path}/step_{step}"
            model.save(save_path, is_checkpoint=True)
Exemplo n.º 3
0
def setup(db):
    """Create the app"""
    kanban = Flask(__name__)
    kanban.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///kanban.db'
    kanban.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
    db.init_app(kanban)
    kanban.app_context().push()
    db.create_all()
    if db.session.query(Task).first() is None:
        try:
            pre_data = cipher.read_from_file()
            for task in pre_data:
                tasks.create_task(body=task["body"],
                                  column=task["column"],
                                  sort_order=task["sort_order"],
                                  user=task["user"],
                                  modified=task["modified"])
        except:
            pass
    return kanban
Exemplo n.º 4
0
def save(args):
    """
    Inference main function.
    """
    dev_count = 1
    gpu_id = 0
    place = fluid.CUDAPlace(gpu_id)

    task = tasks.create_task(args)
    model = models.create_model(args, place)
    model.save_infer_model(args.nsp_inference_model_path)
    return
Exemplo n.º 5
0
def create_task():
	"""Route for create new task"""

	# Get form data
	task_data = {
		'user_id': g.user.id,
		'description': request.form.get('description'),
		'due_date': request.form.get('due_date'),
		'priority': request.form.get('priority')
	}

	# Input validation
	# Check if any field is null
	required = []
	for key, value in task_data.iteritems():
		if not value:
			required.append(key)

	if required:  # If any field is null
		return error_handlers.bad_request( (",").join(required) + " required")
	
	else:  # Input valid

		new_task_id = tasks.create_task(task_data)
		response={}
		
		if new_task_id:
			links = '%s/%s' % (request.url, new_task_id)
			response['data'] = {
					'id' : new_task_id,
			        'type': 'tasks',
			        'attributes': task_data,
			        'links' : {
			        	'self': links
			        }
			}
			status = 201
		else:
			status = 202

		resp = jsonify(response)

		# Header JSONAPI
		resp.mimetype = 'application/vnd.api+json'  
		resp.location = links
		resp.status_code = status

		return resp
Exemplo n.º 6
0
def randomTaskGenerator():
    """Generate random sayings for random people"""
    with app.app_context():
        print("Making random comments")
        people = ["Arthur", "Marvin", "Ford", "Deep Thought"]
        sayings = [
            "Don't Panic.", "Space is big.", "Where's your towel?", "42",
            "I don't know why I bother..."
        ]
        while not thread_stop_event.isSet():
            person = people[random.randint(0, 3)]
            saying = sayings[random.randint(0, 4)]
            print(person + " says '" + saying + "'")
            task_id, user, date = tasks.create_task(body=saying, user=person)
            socketio.emit('new_event', {
                "id": task_id,
                "user": user,
                "body": saying
            },
                          namespace='/test')
            socketio.sleep(5)
Exemplo n.º 7
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    parser.add_argument('-name', type=str, help='Name of the experiment.')
    parser.add_argument('-seed', type=int, default=2, help='Random seed.')
    parser.add_argument('-gsize',
                        type=int,
                        default=1,
                        help='Num of devices per task.')
    parser.add_argument('-val_freq',
                        type=int,
                        help='Num of transfer frequency.')
    parser.add_argument('-train',
                        action='store_true',
                        help='Specify if training.')
    parser.add_argument(
        '-lmdb',
        action='store_true',
        help='create or use lmdb datasets for accelerating I/O.')
    parser.add_argument('-droot', type=str, help='dataroot if need specify.')
    parser.add_argument('-rancl',
                        action='store_true',
                        help='random task for transfer?')
    parser.add_argument('-create_val',
                        action='store_true',
                        help='create validation set from training set')
    parser.add_argument('-model', type=str, help='specify model to use')
    parser.add_argument('-k',
                        action='store_true',
                        help='Use knowledge distillation based transfer.')
    parser.add_argument('-a',
                        action='store_true',
                        help='Use attention based transfer.')
    parser.add_argument('-f',
                        action='store_true',
                        help='Use fsp matrix based transfer.')
    parser.add_argument('-w',
                        action='store_true',
                        help='Use weights transfer.')
    parser.add_argument('-ws',
                        action='store_true',
                        help='Use weights statistical transfer.')
    parser.add_argument('-VL',
                        action='store_true',
                        help='Variate on loss function.')
    parser.add_argument('-VO',
                        action='store_true',
                        help='Variate on optimization algorithm.')
    parser.add_argument('-VH',
                        action='store_true',
                        help='Variate on hyperparameters.')
    parser.add_argument('-VD',
                        action='store_true',
                        help='Variate on datasets.')
    parser.add_argument('-VF',
                        action='store_true',
                        help='Variate on cross validation.')
    parser.add_argument('-VS', type=float, help='fraction of bagging dataset.')
    opt = parser.parse_args()

    if not opt.train and opt.gsize > 1:
        warnings.warn(
            '1 device for validation is enough! The gsize will be reset to 1')
        opt.gsize = 1

    if not opt.w and opt.gsize > 1:
        warnings.warn(
            'The exploit-explorer mode is not activated! The gsize will be reset to 1'
        )
        opt.gsize = 1

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    gpus_per_node = torch.cuda.device_count()
    if gpus_per_node == 0:
        device_id = 'cpu'
    else:
        device_id = rank % gpus_per_node
    print('start running! {} gpus are in use'.format(gpus_per_node))
    print('Torch version {}'.format(torch.__version__))

    # run to here is fine!!
    # torch.cuda.set_device(device_id)
    device_per_task = opt.gsize
    task_id = rank // device_per_task
    task_exploit_rank = rank - rank % device_per_task
    world_size = comm.Get_size()
    opt.ntasks = world_size

    opt = option.parse(opt, task_id)
    opt = option.dict_to_nonedict(
        opt)  # Convert to NoneDict, which return None for missing key.

    # Setup directory
    if not opt['resume'] and task_exploit_rank == rank and not opt['path'][
            'resume_state']:
        try:
            if opt['is_train'] and rank == 0:
                util.mkdir_and_rename(
                    opt['path']
                    ['experiments_root'])  # rename old folder if exists
        except FileNotFoundError:
            raise FileNotFoundError("Issue from task {} and rank {}".format(
                task_id, rank))
    comm.Barrier()

    if not opt['resume'] and task_exploit_rank == rank and not opt['path'][
            'resume_state']:
        util.mkdirs((path for key, path in opt['path'].items() if key not in [
            'experiments_root', 'log', 'root', 'pretrain_model',
            'pretrain_model_G', 'resume_state', 'data_config'
        ]))
        # save json file to task folder
        if opt['is_train']:
            with open(os.path.join(opt['path']['task'], 'task.json'),
                      'w') as outfile:
                json.dump(opt, outfile, indent=4, ensure_ascii=False)

    comm.Barrier()

    # config loggers. Before it, the log will not work
    util.setup_logger(str(task_id),
                      opt['path']['log'],
                      'train',
                      level=logging.INFO,
                      screen=True,
                      task_id=task_id,
                      rank=rank)
    logger = logging.getLogger(str(task_id))

    if task_exploit_rank == rank:
        if task_id == 0:
            logger.info(option.dict2str(opt))  # display options
        else:
            logger.info(
                'Auxiliary task {} configuration: network: {}, optim: {}, loss: {}, data: {}'
                .format(task_id, opt['network'], opt['train']['optim'],
                        opt['train']['loss'],
                        opt['datasets']['train']['name']))

    # tensorboard logger
    tb_logger = None
    if opt['use_tb_logger'] and opt[
            'is_train'] and task_exploit_rank == rank:  # and 'debug' not in opt['name']
        from tensorboardX import SummaryWriter
        if rank == 0:
            util.mkdir_and_rename(os.path.join('../tb_logger/', opt['name']))
        comm.barrier()

        tb_logger_path = os.path.join('../tb_logger/', opt['name'],
                                      str(task_id))
        tb_logger = SummaryWriter(log_dir=tb_logger_path)

    # create task
    # If you don't use the following setting, the results will still be non-deterministic.
    # However, if you set them so, the speed may be slower, depending on models.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    task = create_task(opt, comm=comm, device=device_id)

    # Start training or testing
    start = time()
    if opt['is_train']:
        task.resume_training()
        task.full_training(opt['logger']['print_freq'], opt['val_freq'],
                           opt['logger']['save_checkpoint_freq'], tb_logger)

        if rank == 0:
            if opt['varyOnCV']:
                all_hmean = task.cv_validation()
                results = [r['acc'] for r in all_hmean]
                idx = np.argmax(results)
                best_task_id = all_hmean[idx]['id']
                best_task_rank = device_per_task * best_task_id
                logger.info(
                    'All hmean is {} and select rank {} to run validation'.
                    format(all_hmean, best_task_rank))
                if task_exploit_rank == best_task_rank:
                    task.validation(verbose=True,
                                    report=True,
                                    split='test',
                                    best=True)
            else:
                task.validation(verbose=True,
                                report=True,
                                split='test',
                                best=True)

            logger.info('End of training.')
    else:
        logger.info('Task {} start validation'.format(task_id))
        task.validation(verbose=True, report=True, split='test', best=True)

    duration = time() - start
    logger.info('The program tasks time {}'.format(duration))
Exemplo n.º 8
0
def train(args):
    """
    Train main function.
    """
    if args.is_distributed:
        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)

        dev_count = fluid.core.get_cuda_device_count()
        gpu_id = int(os.getenv("FLAGS_selected_gpus"))
        trainers_num = fleet.worker_num()
        trainer_id = fleet.worker_index()
    else:
        dev_count = 1
        gpu_id = 0
        trainers_num = 1
        trainer_id = 0
    place = fluid.CUDAPlace(gpu_id)

    task = tasks.create_task(args)
    model = models.create_model(args, place)
    train_generator = task.get_data_loader(model,
                                           input_file=args.train_file,
                                           num_epochs=args.num_epochs,
                                           num_part=trainers_num,
                                           part_id=trainer_id,
                                           phase="train")
    valid_generator = task.get_data_loader(
        model,
        input_file=args.valid_file,
        num_part=dev_count,
        part_id=gpu_id,
        phase="distributed_valid" if args.is_distributed else "valid")

    # run training
    timer = Timer()
    timer.start()
    if args.Model.model == 'NSPModel':
        best_metrics = 0.0
    else:
        best_metrics = 10000
    shuffledatafile()
    for step, data in enumerate(train_generator(), args.start_step + 1):
        outputs = task.train_step(model, data)
        timer.pause()
        if step % args.log_steps == 0:
            time_cost = timer.pass_time
            current_epoch, current_file_index, total_file = task.reader.get_train_progress(
            )
            print(
                f"[train][{current_epoch}] progress: {current_file_index}/{total_file} "
                f"step: {step}, time: {time_cost:.3f}, "
                f"speed: {args.log_steps / time_cost:.3f} steps/s")
            print(f"\tcurrent lr: {outputs.pop('scheduled_lr'):.7f}")
            metrics = task.get_metrics(outputs)
            print("\t" + ", ".join(f"{k}: {v:.4f}"
                                   for k, v in metrics.items()))
            timer.reset()

        if step % args.validation_steps == 0:

            # shuffledatafile()
            metrics = evaluate(task, model, valid_generator, args, dev_count,
                               gpu_id, step)
            if args.Model.model == 'NSPModel' and metrics[
                    'nsp_acc'] > best_metrics:
                best_metrics = metrics['nsp_acc']
                save_path = f"{args.save_path}/step_{step}_{best_metrics}"
                model.save(save_path, is_checkpoint=True)

            elif args.Model.model == 'Plato' and metrics['loss'] < best_metrics:
                best_metrics = metrics['loss']
                save_path = f"{args.save_path}/step_{step}_{best_metrics}"
                model.save(save_path, is_checkpoint=True)
        # if step % args.save_steps == 0 and trainer_id == 0:
        #     save_path = f"{args.save_path}/step_{step}"
        #     model.save(save_path, is_checkpoint=True)
        #     with open(save_path + ".finish", "w") as f:
        #         pass

        timer.start()
Exemplo n.º 9
0
def infer(args):
    """
    Inference main function.
    """
    if args.is_distributed:
        dev_count = fluid.core.get_cuda_device_count()
        gpu_id = int(os.getenv("FLAGS_selected_gpus"))
        phase = "distributed_test"
    else:
        dev_count = 1
        gpu_id = 0
        phase = "test"
    place = fluid.CUDAPlace(gpu_id)

    task = tasks.create_task(args)
    model = models.create_model(args, place)
    infer_generator = task.reader.data_generator(input_file=args.infer_file,
                                                 phase=phase,
                                                 is_infer=True)

    # run inference
    begin = time.time()
    infer_out = {}
    steps = 0
    for data in infer_generator():
        predictions = task.infer_step(model, data)
        for info in predictions:
            infer_out[info["data_id"]] = info
        steps += 1
        if steps % args.skip_steps == 0:
            time_cost = time.time() - begin
            print(f"[infer] steps: {steps}, time: {time_cost:.3f}, "
                  f"speed: {steps / time_cost:.3f} steps/s")

    time_cost = time.time() - begin
    print(f"[infer] steps: {steps} time cost: {time_cost}, "
          f"speed: {steps / time_cost} steps/s")

    if args.is_distributed:
        # merge inference outputs in distributed mode.
        part_file = os.path.join(args.save_path,
                                 f"inference_output.part_{gpu_id}")
        with open(part_file, "w") as fp:
            json.dump(predictions, fp, ensure_ascii=False)
        part_finish_file = os.path.join(
            args.save_path, f"inference_output.part_{gpu_id}.finish")
        with open(part_finish_file, "w"):
            pass

        if gpu_id == 0:
            part_files = f"inference_output.part_*.finish"
            while True:
                ret = subprocess.getoutput(
                    f"find {args.save_path} -maxdepth 1 -name {part_files}")
                num_completed = len(ret.split("\n"))
                if num_completed != dev_count:
                    time.sleep(1)
                    continue
                infer_out = {}
                for dev_id in range(dev_count):
                    part_file = os.path.join(
                        args.save_path, f"inference_output.part_{dev_id}")
                    with open(part_file, "r") as fp:
                        part_infer_out = json.load(fp)
                        for data_id in part_infer_out:
                            infer_out[data_id] = part_infer_out[data_id]
                break
            subprocess.getoutput(
                "rm " +
                os.path.join(args.save_path, f"inference_output.part*"))

    if gpu_id == 0:
        # save inference outputs
        inference_output = os.path.join(args.save_path, "inference_output.txt")
        with open(inference_output, "w") as f:
            for data_id in sorted(infer_out.keys(), key=lambda x: int(x)):
                f.write(infer_out[data_id][args.output_name] + "\n")

    return
Exemplo n.º 10
0
 def __init__(self, engine: Engine) -> None:
     super().__init__(engine)
     self.task = create_task(self.engine.effect)
Exemplo n.º 11
0
        Provide options:
            1. creating a task
            2. deleting a task
            3. deleting all tasks
            4. Marking a task finished

        E.g
    """

    print("Select Option:")
    print("1: Create Task")
    print("2: Delete A Task")
    print("3: Mark a task as finished.")
    print("4: Delete All Tasks")

    selection = int(input("selection: "))

# Code that implements the selected option

    if selection == 1:
        task = input('Enter the name of the task to create: ')
        create_task(task)

    if selection == 2:
        task = input('Enter the name of the task to delete: ')

    if selection == 3:
        task = input('Enter the name of the task to mark as finished: ')

    if selection == 4:
        delete_all_tasks()
Exemplo n.º 12
0
def main():
    # options
    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        required=True,
                        help='Path to option JSON file.')
    parser.add_argument('-name', type=str, help='Name of the experiment.')
    parser.add_argument('-ntasks',
                        type=int,
                        required=True,
                        help='Number of tasks. Equals to #main+#auxiliary')
    parser.add_argument('-seed', type=int, help='Random seed.')
    parser.add_argument('-gsize',
                        type=int,
                        default=1,
                        help='Num of devices per task.')
    parser.add_argument('-train',
                        action='store_true',
                        help='Specify if training.')
    parser.add_argument(
        '-lmdb',
        action='store_true',
        help='create or use lmdb datasets for accelerating I/O.')
    parser.add_argument('-droot', type=str, help='dataroot if need specify.')
    parser.add_argument('-rancl',
                        action='store_true',
                        help='random task for transfer?')
    parser.add_argument('-tfreq', type=int, help='transferring frequence')
    parser.add_argument('-model', type=str, help='specify model to use')
    parser.add_argument('-k',
                        action='store_true',
                        help='Use knowledge distillation based transfer.')
    parser.add_argument('-a',
                        action='store_true',
                        help='Use attention based transfer.')
    parser.add_argument('-f',
                        action='store_true',
                        help='Use fsp matrix based transfer.')
    parser.add_argument('-w',
                        action='store_true',
                        help='Use weights transfer.')
    parser.add_argument('-ws',
                        action='store_true',
                        help='Use weights statistical transfer.')
    parser.add_argument('-VL',
                        action='store_true',
                        help='Variate on loss function.')
    parser.add_argument('-VO',
                        action='store_true',
                        help='Variate on optimization algorithm.')
    parser.add_argument('-VH',
                        action='store_true',
                        help='Variate on hyperparameters.')
    parser.add_argument('-VD',
                        action='store_true',
                        help='Variate on datasets.')
    parser.add_argument('-VS',
                        type=float,
                        help='Variate on resampling dataset.')
    args = parser.parse_args()

    if not args.train and args.gsize > 1:
        warnings.warn(
            '1 device for validation is enough! The gsize will be reset to 1')
        args.gsize = 1

    if not args.w and args.gsize > 1:
        warnings.warn(
            'The exploit-explorer mode is not activated! The gsize will be reset to 1'
        )
        args.gsize = 1

    ntasks = args.ntasks
    comm = MPI.COMM_WORLD
    world_size = comm.Get_size()
    rank = comm.Get_rank()
    devices_per_node = torch.cuda.device_count()
    device_id = rank % devices_per_node
    torch.cuda.set_device(device_id)  # set cuda device
    devices_per_task = args.gsize
    cu_id = rank // devices_per_task
    all_solver_id = [
        i for i in list(range(world_size)) if i % devices_per_task == 0
    ]
    ndevices = world_size // devices_per_task

    task_solver_rank = rank - rank % devices_per_task
    task_arxiv_ranks = [
        task_solver_rank + i for i in range(1, devices_per_task)
    ]
    group = comm.Get_group()
    solver_group = MPI.Group.Incl(group, all_solver_id)
    solver_comm = comm.Create(solver_group)

    computation_utility = {
        "rank": rank,
        "task_solver_rank": task_solver_rank,
        "task_arxiv_ranks": task_arxiv_ranks,
        "is_solver": rank == task_solver_rank,
        "world_comm": comm,
        "solver_comm": solver_comm,
        "world_size": world_size
    }

    # If you don't use the following setting, the results will still be non-deterministic.
    # However, if you set them so, the speed may be slower, depending on models.
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    tasks = []
    for task_id in range(ntasks):
        opt = option.parse(args, task_id)
        opt = option.dict_to_nonedict(
            opt)  # Convert to NoneDict, which return None for missing key.

        if rank == 0 and args.train:
            # Setup directory
            if not opt['resume'] or not opt['path']['resume_state']:
                if opt['is_train'] and task_id == 0:
                    util.mkdir_and_rename(
                        opt['path']
                        ['experiments_root'])  # rename old folder if exists

                util.mkdirs(
                    (path for key, path in opt['path'].items()
                     if not key == 'experiments_root'
                     and 'pretrain_model' not in key and 'resume' not in key
                     and 'data_config' not in key))
                # save json file to task folder
                if opt['is_train']:
                    with open(os.path.join(opt['path']['task'], 'task.json'),
                              'w') as outfile:
                        json.dump(opt, outfile, indent=4, ensure_ascii=False)
        comm.barrier()

        # config loggers. Before it, the log will not work
        # util.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO, task_id=task_id, rank=rank)
        util.setup_logger(str(task_id),
                          opt['path']['log'],
                          'train',
                          level=logging.INFO,
                          screen=True,
                          task_id=task_id)
        logger = logging.getLogger(str(task_id))
        if rank == 0:
            if task_id == 0:
                logger.info(option.dict2str(opt))  # display options
            else:
                logger.info(
                    'Auxiliary task {} configuration: network: {}, optim: {}, loss: {}, data: {}'
                    .format(task_id, opt['network'], opt['train']['optim'],
                            opt['train']['loss'],
                            opt['datasets']['train']['name']))

        # tensorboard logger
        tb_logger = None
        if opt['use_tb_logger'] and opt['is_train']:
            from tensorboardX import SummaryWriter
            if rank == 0:
                util.mkdir_and_rename(
                    os.path.join('../tb_logger/', opt['name']))
            comm.barrier()

            tb_logger_path = os.path.join('../tb_logger/', opt['name'],
                                          str(task_id))
            tb_logger = SummaryWriter(log_dir=tb_logger_path)

        # TODO: without device_id, assign device_id in run-time
        # create tasks
        task = create_task(opt,
                           logger=logger,
                           tb_logger=tb_logger,
                           device_id=device_id)
        if args.train:
            task.resume_training()
        tasks.append(task)

    logger.info(computation_utility)

    # Start training or testing
    start = time()
    if args.train:
        # Main task always fixed on CU_0
        # running_tasks_id = np.arange(1, ntasks)
        running_tasks_id = np.arange(ntasks)
        if rank == 0:
            np.random.shuffle(running_tasks_id)
            # running_tasks_id = np.insert(running_tasks_id, 0, 0)
            running_tasks_id = running_tasks_id[:ndevices]
        running_tasks_id = comm.bcast(running_tasks_id, root=0)
        computation_utility['running_tasks'] = running_tasks_id
        tasks[running_tasks_id[cu_id]].weights_allocate(computation_utility)

        # for i in range(1, int(opt['niter']*multiplier)+1):
        nstep = 0
        while True:
            # for step in range(0, max_step):
            # task.step()
            nstep += 1
            # tasks[running_tasks_id[cu_id]].solver_update(computation_utility)
            tasks[running_tasks_id[cu_id]].step(
                rank, opt['logger']['print_freq'],
                opt['logger']['save_checkpoint_freq'],
                computation_utility['is_solver'])

            # if nstep % opt['val_freq'] == 0:
            # 	tasks[running_tasks_id[cu_id]].validation(rank=rank, verbose=True)

            if nstep % opt['val_freq'] == 0:
                tasks[running_tasks_id[cu_id]].solver_update(
                    computation_utility)
                if ntasks > 1:
                    for idx, j in enumerate(running_tasks_id):
                        tasks[j].synchronize(idx * devices_per_task,
                                             computation_utility)
                    # tasks[0].update_best(tasks)

                running_tasks_id = np.arange(ntasks)
                if rank == 0:
                    np.random.shuffle(running_tasks_id)
                    # running_tasks_id = np.insert(running_tasks_id, 0, 0)
                    running_tasks_id = running_tasks_id[:len(all_solver_id)]
                running_tasks_id = comm.bcast(running_tasks_id, root=0)
                computation_utility['running_tasks'] = running_tasks_id

                tasks[running_tasks_id[cu_id]].weights_allocate(
                    computation_utility)

            if tasks[0].training_step >= opt['niter']:
                # logger.info('main task historic best: {}'.format(tasks[0].historic_best))
                break

        if rank == 0:
            logger.info('Saving the final task.')
            for task in tasks:
                task.save('latest')
            logger.info('End of training.')
            # tasks[0].inference()
            tasks[0].validation(verbose=True, report=True, split='test')
    else:
        if rank == 0:
            logger.info('Task {} start testing'.format(task_id))
            # tasks[0].validation(verbose=True, report=True)
            # tasks[0].inference(directory='../data/INRIA/test')
            # tasks[0].inference('../data/RSSRAI/test')

    duration = time() - start
    logger.info('The program tasks time {}'.format(duration))
Exemplo n.º 13
0
def async_upload(**kwargs):
    sio = SocketIO(settings.SOCKETIO_SERVER, settings.SOCKETIO_PORT)
    project_id = kwargs['project_id']
    project_name = kwargs['project_name']
    camera_id = kwargs['camera_id']
    deploymentLocationID = kwargs['deploymentLocationID']
    filename = kwargs['filename']
    path = kwargs['path']
    room = kwargs['room']
    duplicates = kwargs['duplicates']

    with open(path) as file:
        mime = magic.from_file(path, mime=True)
        isvideo = True
        if 'image' in mime:
            isvideo = False
        if isvideo:
            video_url, thumbnail_url = handle_video(filename)
            tmp = dict(project_id=project_id,
                       filename=filename,
                       url=thumbnail_url,
                       video_url=video_url,
                       isvideo=True,
                       camera_id=camera_id,
                       ahash=None,
                       content_type="video/mp4",
                       deploymentLocationID=deploymentLocationID)
            task = create_task(pbclient, **tmp)
            final = dict(status='ok', exif=None,
                         task=task.__dict__['data'],
                         room=room)
            sio.emit('jobcompleted', final)
            return final

        else:
            try:
                # Get from Exif DateTimeOriginal
                exif_dict = piexif.load(path)
                exif_dict.pop('thumbnail')
                data_d = {}
                for ifd in exif_dict:
                    data_d[ifd] = {
                        piexif.TAGS[ifd][tag]["name"]: exif_dict[ifd][tag]
                        for tag in exif_dict[ifd]}
                # Resize file to settings size
                thumbnail = Image.open(file)
                thumbnail.thumbnail(settings.THUMBNAIL)
                thumbnail.save(path)
                exif = 'removed'
                piexif.remove(path)
                Create_time = data_d['Exif']['DateTimeOriginal']
            except InvalidImageDataError:
                exif = 'This image types does not support EXIF'
                Create_time = None
            except KeyError:
                exif = 'This image types does not support EXIF'
                Create_time = None

            image_exists, ahash, task = check_exists(path)

            if duplicates == 'No':
                image_exists = False

            if image_exists is False:
                data_url = upload_to_s3(path, filename)
                tmp = dict(project_id=project_id,
                           filename=filename,
                           url=data_url,
                           video_url=None,
                           isvideo=False,
                           camera_id=camera_id,
                           ahash=ahash,
                           content_type=mime,
                           Create_time=Create_time,
                           deploymentLocationID=deploymentLocationID)
                task = create_task(pbclient, **tmp)
                final = dict(status='ok', exif=exif,
                             task=task.__dict__['data'],
                             room=room)
                sio.emit('jobcompleted', final)
                return final
            else:
                final = dict(status='ok', exif=exif,
                             task=task,
                             room=room)
                sio.emit('jobcompleted', final)
                return final
Exemplo n.º 14
0
def main(hparams):
    model = create_task(hparams)
    trainer = create_trainer(hparams)
    trainer.fit(model)