Exemplo n.º 1
0
    def __init__(self,
                 config: dict = None,
                 pose_autoencoder=None,
                 cost_input_dimension=None,
                 phase_dim=0,
                 input_slicers: list = None,
                 output_slicers: list = None,
                 train_set=None,
                 val_set=None,
                 name="model",
                 load=False):
        super().__init__()

        if not load:
            self.pose_autoencoder = pose_autoencoder  # start with 3
            cost_hidden_dim = config["cost_hidden_dim"]
            cost_output_dim = config["cost_output_dim"]
            self.cost_encoder = MLP(dimensions=[
                cost_input_dimension, cost_hidden_dim, cost_hidden_dim,
                cost_output_dim
            ],
                                    name="CostEncoder",
                                    load=True,
                                    single_module=-1)

            phase_dim = input_slicers[0]
            moe_input_dim = pose_autoencoder.dimensions[
                -1] + phase_dim + cost_output_dim
            moe_output_dim = pose_autoencoder.dimensions[
                -1] + pose_autoencoder.extra_feature_len + phase_dim * 2 + cost_input_dimension
            self.generationModel = RNN(
                config=config,
                dimensions=[moe_input_dim, moe_output_dim],
                device=self.device,
                batch_size=2 * config["batch_size"],
                name="GRU")

            # (120 * config["batch_size"]) / config["autoregress_chunk_size"]) - 1 *
            #        config["batch_size"],

            # self.batch_norm = nn.BatchNorm1d(np.sum())

            self.in_slices = [0] + list(accumulate(add, input_slicers))
            # self.in_slices = input_slicers
            self.out_slices = [0] + list(accumulate(add, output_slicers))
            # self.out_slices = output_slicers

            self.config = config
            self.batch_size = config["batch_size"]
            self.learning_rate = config["lr"]
            self.loss_fn = config["loss_fn"]
            self.autoregress_chunk_size = config["autoregress_chunk_size"]
            # self.autoregress_prob = config["autoregress_prob"]
            # self.autoregress_inc = config["autoregress_inc"]
            self.best_val_loss = np.inf
            self.phase_smooth_factor = 0.9

        self.train_set = train_set
        self.val_set = val_set
        self.name = name
    def __init__(self,
                 config: dict = None,
                 Model=None,
                 pose_autoencoder=None,
                 feature_dims=None,
                 input_slicers: list = None,
                 output_slicers: list = None,
                 train_set=None,
                 val_set=None,
                 test_set=None,
                 name="MotionGeneration"):
        super().__init__()

        self.feature_dims = feature_dims
        self.config = config

        self.loss_fn = config[
            "loss_fn"] if "loss_fn" in config else nn.functional.mse_loss
        self.opt = config[
            "optimizer"] if "optimizer" in config else torch.optim.Adam
        self.scheduler = config["scheduler"] if "scheduler" in config else None
        self.scheduler_param = config[
            "scheduler_param"] if "scheduler_param" in config else None
        self.batch_size = config["batch_size"]
        self.learning_rate = config["lr"]

        self.best_val_loss = np.inf
        self.phase_smooth_factor = 0.9

        self.pose_autoencoder = pose_autoencoder if pose_autoencoder is not None else \
            MLP(config=config, dimensions=[feature_dims["pose_dim"]], name="PoseAE")
        self.use_label = pose_autoencoder is not None and pose_autoencoder.use_label

        cost_hidden_dim = config["cost_hidden_dim"]
        self.cost_encoder = MLP(config=config,
                                dimensions=[
                                    feature_dims["cost_dim"], cost_hidden_dim,
                                    cost_hidden_dim, cost_hidden_dim
                                ],
                                name="CostEncoder",
                                single_module=-1)

        self.generationModel = Model(config=config,
                                     dimensions=[
                                         feature_dims["g_input_dim"],
                                         feature_dims["g_output_dim"]
                                     ],
                                     phase_input_dim=feature_dims["phase_dim"])

        self.input_dims = input_slicers
        self.output_dims = output_slicers
        self.in_slices = [0] + list(accumulate(add, input_slicers))
        self.out_slices = [0] + list(accumulate(add, output_slicers))

        self.train_set = train_set
        self.val_set = val_set
        self.test_set = test_set
        self.name = name
    def __init__(self,
                 config: dict = None,
                 pose_autoencoder=None,
                 cost_input_dimension=None,
                 phase_dim=0,
                 input_slicers: list = None,
                 output_slicers: list = None,
                 train_set=None,
                 val_set=None,
                 name="model",
                 load=False):
        super().__init__()

        if not load:
            self.pose_autoencoder = pose_autoencoder  # start with 3
            cost_hidden_dim = config["cost_hidden_dim"]
            cost_output_dim = config["cost_output_dim"]
            self.cost_encoder = MLP(dimensions=[
                cost_input_dimension, cost_hidden_dim, cost_hidden_dim,
                cost_output_dim
            ],
                                    name="CostEncoder",
                                    load=True,
                                    single_module=-1)

            self.phase_dim = phase_dim
            phase_dim = input_slicers[0]
            moe_input_dim = pose_autoencoder.dimensions[-1] + cost_output_dim
            moe_output_dim = pose_autoencoder.dimensions[
                -1] + self.phase_dim + cost_input_dimension
            self.generationModel = MoE(
                config=config,
                dimensions=[moe_input_dim, moe_output_dim],
                phase_input_dim=phase_dim,
                name="MixtureOfExperts")

            self.in_slices = [0] + list(accumulate(add, input_slicers))
            self.out_slices = [0] + list(accumulate(add, output_slicers))

            self.config = config
            self.batch_size = config["batch_size"]
            self.learning_rate = config["lr"]
            self.loss_fn = config["loss_fn"]
            self.window_size = config["window_size"]
            self.autoregress_chunk_size = config["autoregress_chunk_size"]
            self.autoregress_prob = config["autoregress_prob"]
            self.autoregress_inc = config["autoregress_inc"]
            self.best_val_loss = np.inf
            self.phase_smooth_factor = 0.9
            self.epochs = 0

        self.train_set = train_set
        self.val_set = val_set
        self.name = name
        self.epochs = 0
        self.automatic_optimization = False
    def swap_pose_encoder(self, pose_encoder=None,
                          input_dim=None, output_dim=None,
                          feature_dims=None, freeze=False):
        self.pose_autoencoder = pose_encoder
        self.input_dims = input_dim
        self.output_dims = output_dim
        self.feature_dims = feature_dims
        self.in_slices = [0] + list(accumulate(add, self.input_dims))
        self.out_slices = [0] + list(accumulate(add, self.output_dims))

        if freeze:
            self.generationModel.freeze()
            self.cost_encoder.freeze()
Exemplo n.º 5
0
    def _decode_header_to_dict(
            cls, encoded_header: bytes) -> Iterator[Tuple[str, Any]]:
        if len(encoded_header) != cls.smc_encoded_size:
            raise ValidationError(
                "Expected encoded header to be of size: {0}. Got size {1} instead.\n- {2}"
                .format(
                    cls.smc_encoded_size,
                    len(encoded_header),
                    encode_hex(encoded_header),
                ))

        start_indices = accumulate(lambda i, field: i + field[2],
                                   cls.fields_with_sizes, 0)
        field_bounds = sliding_window(2, start_indices)
        for byte_range, field in zip(field_bounds, cls._meta.fields):
            start_index, end_index = byte_range
            field_name, field_type = field

            field_bytes = encoded_header[start_index:end_index]
            if field_type == rlp.sedes.big_endian_int:
                # remove the leading zeros, to avoid `not minimal length` error in deserialization
                formatted_field_bytes = field_bytes.lstrip(b'\x00')
            elif field_type == address:
                formatted_field_bytes = field_bytes[-20:]
            else:
                formatted_field_bytes = field_bytes
            yield field_name, field_type.deserialize(formatted_field_bytes)
Exemplo n.º 6
0
    def __init__(self,
                 config: dict = None,
                 input_dims: list = None,
                 pose_labels=None,
                 train_set=None,
                 val_set=None,
                 test_set=None,
                 name: str = "model",
                 save_period=5,
                 workers=6):

        super(RBF, self).__init__()

        M = len(input_dims)

        self.name = name
        self.input_dims = input_dims
        self.input_slice = [0] + list(accumulate(add, input_dims))

        self.act = nn.ELU
        self.save_period = save_period
        self.workers = workers
        self.pose_labels = pose_labels if pose_labels is not None else [
            None for _ in range(M)
        ]

        self.config = config
        self.basis_func = basis_func_dict()[config["basis_func"]]
        self.hidden_dim = config["hidden_dim"]
        self.keep_prob = config["keep_prob"]
        self.k = config["k"]
        self.learning_rate = config["lr"]
        self.batch_size = config["batch_size"]

        self.loss_fn = config[
            "loss_fn"] if "loss_fn" in config else nn.functional.mse_loss
        self.opt = config[
            "optimizer"] if "optimizer" in config else torch.optim.Adam
        self.scheduler = config["scheduler"] if "scheduler" in config else None
        self.scheduler_param = config[
            "scheduler_param"] if "scheduler_param" in config else None

        self.models = [
            MLP(config=config,
                dimensions=[input_dims[i]],
                pose_labels=self.pose_labels[i],
                name="M" + str(i),
                single_module=0) for i in range(M)
        ]
        self.active_models = self.models

        self.cluster_model = RBF_Layer(in_features=self.k,
                                       out_features=self.k,
                                       basis_func=self.basis_func)

        self.train_set = train_set
        self.val_set = val_set
        self.test_set = test_set

        self.best_val_loss = np.inf
Exemplo n.º 7
0
    def add_models(self,
                   input_dims: list = None,
                   pose_labels: list = None,
                   freeze=False):
        n = len(self.models) + 1
        if pose_labels is not None:
            self.models += [
                MLP(config=self.config,
                    dimensions=[input_dims[i]],
                    pose_labels=pose_labels[i],
                    name="M" + str(i + n),
                    single_module=0) for i in range(len(input_dims))
            ]
        else:
            self.models += [
                MLP(config=self.config,
                    dimensions=[input_dims[i]],
                    name="M" + str(i + n),
                    single_module=0) for i in range(len(input_dims))
            ]
        if freeze:
            for model in self.active_models:
                model.freeze(True)
            self.active_models = self.models[n - 1:]
            self.input_dims = input_dims
        else:
            self.active_models = self.models
            self.input_dims += input_dims

        self.input_slice = [0] + list(accumulate(add, self.input_dims))
Exemplo n.º 8
0
def test_headerdb_get_score_for_non_genesis_headers(headerdb, genesis_header):
    headerdb.persist_header(genesis_header)

    headers = mk_header_chain(genesis_header, length=10)
    difficulties = tuple(h.difficulty for h in headers)
    scores = tuple(accumulate(operator.add, difficulties, genesis_header.difficulty))

    headerdb.persist_header_chain(headers)

    for header, expected_score in zip(headers, scores[1:]):
        actual_score = headerdb.get_score(header.hash)
        assert actual_score == expected_score
Exemplo n.º 9
0
 def __init__(self, *clause, copula=None, name=None):
     """
     :param clause: clause like objects including :class:`Clause`,
         :class:`ParallelGroup`, :class:`SequentialGroup` etc
     :param copula: a custom copula function
     :param name: a optional name of this group
     """
     super().__init__(*clause, name=name)
     if copula is None:
         self.copula = Lambda(lambda lst, context: list(accumulate(mul, (1 - x for x in lst), initial=1)),
                              repre='DefaultCopula')
     else:
         self.copula = copula
Exemplo n.º 10
0
def assert_is_canonical_chain(headerdb, headers):
    if not headers:
        return

    # verify that the HEAD is correctly set.
    head = headerdb.get_canonical_head()
    assert_headers_eq(head, headers[-1])

    # verify that each header is set as the canonical block.
    for header in headers:
        canonical_hash = headerdb.get_canonical_block_hash(header.block_number)
        assert canonical_hash == header.hash

    # verify difficulties are correctly set.
    base_header = headerdb.get_block_header_by_hash(headers[0].parent_hash)

    difficulties = tuple(h.difficulty for h in headers)
    scores = tuple(accumulate(operator.add, difficulties, base_header.difficulty))

    for header, expected_score in zip(headers, scores[1:]):
        actual_score = headerdb.get_score(header.hash)
        assert actual_score == expected_score
Exemplo n.º 11
0
def ranges(c):
    return accumulate(add, cons(0, c))