Exemple #1
0
def main_worker(local_rank: int, args: Args, dist_url: str):
    print('Local Rank:', local_rank)

    # log in main process only
    if local_rank == 0:
        set_logging_basic_config(args)

    logger.info(f'Args = \n{args}')

    if args.config is not None and args.experiment_dir is not None:
        # Open multi-process. We only have one group, which is on the current node.
        dist.init_process_group(
            backend='nccl',
            init_method=dist_url,
            world_size=args.world_size,
            rank=local_rank,
        )
        utils.reproduction.cudnn_benchmark()

        cfg = get_config(args)
        if local_rank == 0:
            save_config(args, cfg)
            args.save()

        with torch.cuda.device(local_rank):
            if not args.validate:
                engine = Engine(args, cfg, local_rank=local_rank)
                if args.load_checkpoint is not None:
                    engine.load_checkpoint(args.load_checkpoint)
                elif args.moco_checkpoint is not None:
                    engine.load_moco_checkpoint(args.moco_checkpoint)
                engine.run()
                validate_checkpoint = args.experiment_dir / 'model_best.pth.tar'
            else:
                validate_checkpoint = args.load_checkpoint
                if not validate_checkpoint:
                    raise ValueError(
                        'With "--validate" specified, you should also specify "--load-checkpoint"'
                    )

            logger.info('Doing final validate.')
            engine = Engine(args,
                            cfg,
                            local_rank=local_rank,
                            final_validate=True)
            engine.load_checkpoint(validate_checkpoint)
            engine.validate_epoch()
            if engine.summary_writer is not None:
                engine.summary_writer.flush()

    else:
        logger.warning('No config. Do nothing.')
Exemple #2
0
def main():
    args = Args.from_args()

    if args.debug:
        pass
    elif args.world_size < 2:
        warnings.warn('World size must be larger than 1')
        exit()

    if args.seed is not None:
        utils.reproduction.initialize_seed(args.seed)

    utils.environment.ulimit_n_max()

    # Run on main process to avoid conflict
    args.resolve_continue()
    args.make_run_dir()
    args.save()
    utils.pack_code(args.run_dir)

    free_port = utils.distributed.find_free_port()
    dist_url = f'tcp://127.0.0.1:{free_port}'

    print(f'world_size={args.world_size} Using dist_url={dist_url}')

    args.parser = None
    # Only single node distributed training is supported
    mp.spawn(main_worker, args=(
        args,
        dist_url,
    ), nprocs=args.world_size)
Exemple #3
0
def main():
    args = Args.from_args()

    if args.seed is not None:
        utils.reproduction.initialize_seed(args.seed)

    # run in main process for preventing concurrency conflict
    args.resolve_continue()
    args.make_run_dir()
    args.save()
    pack_code(args.run_dir)

    utils.environment.ulimit_n_max()

    free_port = utils.distributed.find_free_port()
    dist_url = f'tcp://127.0.0.1:{free_port}'

    print(f'world_size={args.world_size} Using dist_url={dist_url}')
    """
    We only consider single node here. 'world_size' is the number of processes.
    """
    args.parser = None
    mp.spawn(main_worker, args=(
        args,
        dist_url,
    ), nprocs=args.world_size)
Exemple #4
0
    def __init__(
        self, model_path, n_substeps, gripper_extra_height, block_gripper,
        has_object, target_in_the_air, target_offset, obj_range, target_range,
        distance_threshold, initial_qpos, reward_type,
    ):
        """Initializes a new Fetch environment.

        Args:
            model_path (string): XML文件的路径,这里可以写URDF,在bmirobot里用的是Pybullet环境
            n_substeps (int): 目前推测n-substep是 每次step用的步数。比如一个动作发出后,后续25个时间步骤就继续执行动作
            gripper_extra_height (float): 当定位夹持器时,额外的高度高于桌子
            block_gripper (boolean): 抓手是否被阻塞(即不能移动)
            has_object (boolean):环境中是否有对象
            target_in_the_air (boolean):目标物是否应该在桌子上方的空中或桌面上
            target_offset (float or array with 3 elements): 目标偏移量
            obj_range (float): 初始目标位置采样的均匀分布范围
            target_range (float):采样目标的均匀分布范围
            distance_threshold (float): 目标达到之后的临界值
            initial_qpos (dict):定义初始配置的联合名称和值的字典
            reward_type ('sparse' or 'dense'):奖励类型,如稀疏或密集
        """
        IS_USEGUI = Args().Use_GUI
        self.gripper_extra_height = gripper_extra_height
        self.block_gripper = block_gripper
        self.has_object = has_object
        self.target_in_the_air = target_in_the_air
        self.target_offset = target_offset
        self.obj_range = obj_range
        self.target_range = target_range
        self.distance_threshold = distance_threshold
        self.reward_type = reward_type
        self.model_path=model_path
        self.n_substeps=n_substeps
        self.n_actions=4
        self.blockUid = -1
        self.initial_qpos=initial_qpos
        self._urdfRoot = pybullet_data.getDataPath()
        self.seed()
        #是否进行渲染,GUI是图形界面,direct是不渲染
        if IS_USEGUI:
            self.physics = p.connect(p.GUI)
        else:
            self.physics = p.connect(p.DIRECT)
        #加载机器人模型
        self._bmirobot = bmirobotv0()
        self._timeStep= 1. / 240.
        action_dim = 4
        self._action_bound = 0.5
        # 这里的action和obs space 的low and high 可能需要再次考虑
        action_high = np.array([self._action_bound] * action_dim)
        self.action_space = spaces.Box(-action_high, action_high)
        #重置环境
        self.reset()
            ag = ag_new
        if info['is_success'] == 1.0:
            savetime += 1
            print("This is " + str(savetime) + " savetime ")
            ep_obs.append(obs.copy())
            ep_ag.append(ag.copy())
            mb_obs.append(ep_obs)
            mb_ag.append(ep_ag)
            mb_g.append(ep_g)
            mb_actions.append(ep_actions)
            mb_info.append(ep_info)
            # convert them into arrays
            obs_total.append(mb_obs)
            actions_total.append(mb_actions)
            g_total.append(mb_g)
            ag_total.append(mb_ag)
            info_total.append(mb_info)
    file = "bmirobot_" + str(savetime) + "_pick_demo.npz"
    np.savez_compressed(file,
                        acs=np.array(actions_total).squeeze(),
                        obs=np.array(obs_total).squeeze(),
                        info=np.array(info_total).squeeze(),
                        g=np.array(g_total).squeeze(),
                        ag=np.array(ag_total).squeeze())


if __name__ == '__main__':
    # get the params
    args = Args()
    launch(args)
        flatten = keras.layers.Flatten()(pool2)
        dense1 = keras.layers.Dense(784, activation='relu')(flatten)
        output_layer = keras.layers.Dense(10, activation='softmax')(dense1)
        model = keras.models.Model(inputs=input_layer,
                                   outputs=output_layer,
                                   name='model' + str(self.num))
        model.compile(optimizer="adam",
                      loss="categorical_crossentropy",
                      metrics=["accuracy"])
        # model.summary()
        return model


if __name__ == "__main__":

    args = Args().getParameters()

    is_test = False
    img_loc = "./train_images"

    # FL 위한 변수
    num_edge = 3
    num_max_image = int(5500 - (5500 % (num_edge + 1)))
    # Edge Server 가 가지고 있다고 가정할 Class 별 image의 index
    # 예 ) edge 0 : 0 ~ 200.png , edge 1 : 201 ~ 400.png ... , device 들은 여러개여도 한 array에
    idx_images = np.arange(num_max_image).reshape(
        (num_edge + 1), int(num_max_image / (num_edge + 1)))

    # Data 관련 변수
    class_size = 10
    Target_class = 9
import os
import torch
import random
import numbers
import numpy as np
import torch.nn as nn
from PIL import Image
from scipy import misc
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms.functional as F
from arguments import Args

opt = Args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


class RandomRotation(object):
    def __init__(self, resample=False, expand=False, center=None, p=opt.p_rot):

        self.degrees = 90
        self.resample = resample
        self.center = center
        self.expand = expand
        self.p = p

    @staticmethod
    def get_params(degrees):
        angle = degrees
        return angle