Beispiel #1
0
def symbol_to_param(sym):
    """
    Take a symbol or expression of symbols and returns the corresponding
    Parameters
    """
    from sympy.core.function import AppliedUndef
    from codegeneration import sympycode

    if sp is None:
        error("sympy is needed for symbol_to_params to work.")

    check_arg(sym, (sp.Symbol, AppliedUndef, sp.Derivative),
              context=symbol_to_param)

    param_str = sympycode(sym)
    indexed = re.search(_indexed_format, param_str)
    if indexed:
        name, indices = indexed.groups()

        # Get dict
        param = _all_symbol_parameters.get(name)
        if param is not None:
            param = param.get(eval("({0})".format(indices)))
    else:

        param = _all_symbol_parameters.get(sympycode(sym))

    if param is None:
        value_error("No parameter with name '{0}' "\
                    "registered. Remember to declare Params which should be "\
                    "used in expression with names.".format(sympycode(sym)))
    return param
def value_namespace(expr, include_derivatives=False):
    """
    Create a value name space for the included symbols in the expression
    """
    from codegeneration import sympycode
    check_arg(expr, sp.Basic)
    ns = {}
    for sym in symbols_from_expr(expr, \
                        include_derivatives=include_derivatives):

        # Get value
        value = symbol_to_param(sym).value

        # Check for indexed parameters
        param_str = sympycode(sym)
        indexed = re.search(_indexed_format, param_str)
        if indexed:
            name, indices = indexed.groups()
            if name not in ns:
                ns[name] = {}
            ns[name][eval(indices)] = value
        else:
            ns[param_str] = value
            
    return ns
    return dict((sympycode(symbol), symbol_to_param(symbol).value) \
                for symbol in symbols_from_expr(\
                    expr, include_derivatives=include_derivatives))
def symbol_to_param(sym):
    """
    Take a symbol or expression of symbols and returns the corresponding
    Parameters
    """
    from sympy.core.function import AppliedUndef
    from codegeneration import sympycode

    if sp is None:
        error("sympy is needed for symbol_to_params to work.")
        
    check_arg(sym, (sp.Symbol, AppliedUndef, sp.Derivative),
              context=symbol_to_param)
    
    param_str = sympycode(sym)
    indexed = re.search(_indexed_format, param_str)
    if indexed:
        name, indices = indexed.groups()

        # Get dict
        param = _all_symbol_parameters.get(name)
        if param is not None:
            param = param.get(eval("({0})".format(indices)))
    else:
            
        param = _all_symbol_parameters.get(sympycode(sym))

    if param is None:
        value_error("No parameter with name '{0}' "\
                    "registered. Remember to declare Params which should be "\
                    "used in expression with names.".format(sympycode(sym)))
    return param
def symbol_param_value_namespace(expr):
    """
    Create a value name space for the included symbols in the expression
    """
    check_arg(expr, sp.Basic)
    return dict((str(symbol_param), symbol_to_param(symbol_param).value) \
                for symbol_param in iter_symbol_params_from_expr(expr))
Beispiel #5
0
def symbol_param_value_namespace(expr):
    """
    Create a value name space for the included symbols in the expression
    """
    check_arg(expr, sp.Basic)
    return dict((str(symbol_param), symbol_to_param(symbol_param).value) \
                for symbol_param in iter_symbol_params_from_expr(expr))
    def setvalue(self, value):
        """
        Set value of ArrayParameter
        """

        # An initial slive for the whole array
        index = slice(0,len(self._value)+1)

        # Tuple means index assignment
        # FIXME: Add support for slices
        if isinstance(value, tuple):
            if len(value) != 2:
                value_error("expected a tuple of length 2 when assigning "\
                            "single items")
            if not isinstance(value[0], integers):
                value_error("expected first value in index assignment to be"\
                            " an integer")
            if not isinstance(value[1], scalars):
                value_error("expected second value in index assignment to be"\
                            " an scalar")
            index = value[0]
            value = value[1]

        check_arg(value, nptypes, context=ArrayParam.setvalue)

        if isinstance(value, np.ndarray):
            if len(value) != len(self._value):
                value_error("expected the passed array to be of "\
                            "size: '%d'"%len(self._value))

        # Assign value
        self._value[index] = self.check(value)
Beispiel #7
0
def store_symbol_parameter(param):
    """
    Store a symbol parameter
    """
    from codegeneration import sympycode
    from parameters import ScalarParam
    check_arg(param, ScalarParam)
    sym = param.sym
    #if str(sym) in _all_symbol_parameters:
    #    warning("Parameter with symbol name '%s' already "\
    #            "excist" % sym)

    param_str = sympycode(sym)
    indexed = re.search(_indexed_format, param_str)
    if indexed:
        name, indices = indexed.groups()

        # Get dict
        param_dict = _all_symbol_parameters.get(name)
        if param_dict is None:
            param_dict = {}
            _all_symbol_parameters[name] = param_dict

        param_dict[eval("({0})".format(indices))] = param

    else:
        _all_symbol_parameters[param_str] = param
Beispiel #8
0
def value_namespace(expr, include_derivatives=False):
    """
    Create a value name space for the included symbols in the expression
    """
    from codegeneration import sympycode
    check_arg(expr, sp.Basic)
    ns = {}
    for sym in symbols_from_expr(expr, \
                        include_derivatives=include_derivatives):

        # Get value
        value = symbol_to_param(sym).value

        # Check for indexed parameters
        param_str = sympycode(sym)
        indexed = re.search(_indexed_format, param_str)
        if indexed:
            name, indices = indexed.groups()
            if name not in ns:
                ns[name] = {}
            ns[name][eval(indices)] = value
        else:
            ns[param_str] = value

    return ns
    return dict((sympycode(symbol), symbol_to_param(symbol).value) \
                for symbol in symbols_from_expr(\
                    expr, include_derivatives=include_derivatives))
def store_symbol_parameter(param):
    """
    Store a symbol parameter
    """
    from codegeneration import sympycode
    from parameters import ScalarParam
    check_arg(param, ScalarParam)
    sym = param.sym
    #if str(sym) in _all_symbol_parameters:
    #    warning("Parameter with symbol name '%s' already "\
    #            "excist" % sym)

    param_str = sympycode(sym)
    indexed = re.search(_indexed_format, param_str)
    if indexed:
        name, indices = indexed.groups()

        # Get dict
        param_dict = _all_symbol_parameters.get(name)
        if param_dict is None:
            param_dict = {}
            _all_symbol_parameters[name] = param_dict

        param_dict[eval("({0})".format(indices))] = param
        
    else:
        _all_symbol_parameters[param_str] = param
Beispiel #10
0
    def setvalue(self, value):
        """
        Set value of ArrayParameter
        """

        # An initial slive for the whole array
        index = slice(0, len(self._value) + 1)

        # Tuple means index assignment
        # FIXME: Add support for slices
        if isinstance(value, tuple):
            if len(value) != 2:
                value_error("expected a tuple of length 2 when assigning "\
                            "single items")
            if not isinstance(value[0], integers):
                value_error("expected first value in index assignment to be"\
                            " an integer")
            if not isinstance(value[1], scalars):
                value_error("expected second value in index assignment to be"\
                            " an scalar")
            index = value[0]
            value = value[1]

        check_arg(value, nptypes, context=ArrayParam.setvalue)

        if isinstance(value, np.ndarray):
            if len(value) != len(self._value):
                value_error("expected the passed array to be of "\
                            "size: '%d'"%len(self._value))

        # Assign value
        self._value[index] = self.check(value)
Beispiel #11
0
    def __init__(self, value, ge=None, le=None, gt=None, lt=None, \
                 unit="1", name="", description=""):
        """
        Creating a ScalarParam

        Arguments
        ---------
        value : scalar
            The initial value of this parameter
        gt : scalar (optional)
            Greater than, range control of argument
        ge : scalar (optional)
            Greater than or equal, range control of argument
        lt : scalar (optional)
            Lesser than, range control of argument
        le : scalar (optional)
            Lesser than or equal, range control of argument
        unit : str (optional, if sympy is available)
            The unit of the scalar parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """
        check_arg(value, scalars, 0, ScalarParam)
        super(ScalarParam, self).__init__(value, name, description)

        self._range = Range(ge, le, gt, lt)
        self._in_range = self._range._in_range

        check_kwarg(unit, "unit", str)
        self._unit = unit

        # Define some string used for pretty print
        self._in_str = self._range._in_str
        self._not_in_str = self._range._not_in_str

        # Create symbol
        if name == "":
            self._sym = dummy_sym
        elif sp is None:
            self._sym = None
        else:
            self._sym = sp.Symbol(name,
                                  real=True,
                                  imaginary=False,
                                  commutative=True,
                                  hermitian=True,
                                  complex=True)

            # Store parameter
            store_symbol_parameter(self)

        # Set the value using the check functionality
        # (Only if not called from derived class)
        if type(self) == ScalarParam:
            self.setvalue(value)
Beispiel #12
0
def iter_symbol_params_from_expr(expr):
    """
    Return an iterator over sp.Symbols from expr
    """
    check_arg(expr, sp.Basic)

    # Filter out dummy symbols
    return (atom for atom in expr.atoms() if isinstance(atom, sp.Symbol) \
            and not isinstance(atom, sp.Dummy) and atom.name)
def iter_symbol_params_from_expr(expr):
    """
    Return an iterator over sp.Symbols from expr
    """
    check_arg(expr, sp.Basic)
    
    # Filter out dummy symbols
    return (atom for atom in expr.atoms() if isinstance(atom, sp.Symbol) \
            and not isinstance(atom, sp.Dummy) and atom.name)
    def __init__(self, value, ge=None, le=None, gt=None, lt=None, \
                 unit="1", name="", description=""):
        """
        Creating a ScalarParam

        Arguments
        ---------
        value : scalar
            The initial value of this parameter
        gt : scalar (optional)
            Greater than, range control of argument
        ge : scalar (optional)
            Greater than or equal, range control of argument
        lt : scalar (optional)
            Lesser than, range control of argument
        le : scalar (optional)
            Lesser than or equal, range control of argument
        unit : str (optional, if sympy is available)
            The unit of the scalar parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """
        check_arg(value, scalars, 0, ScalarParam)
        super(ScalarParam, self).__init__(value, name, description)

        self._range = Range(ge, le, gt, lt)
        self._in_range = self._range._in_range

        check_kwarg(unit, "unit", str)
        self._unit = unit

        # Define some string used for pretty print
        self._in_str = self._range._in_str
        self._not_in_str = self._range._not_in_str

        # Create symbol
        if name == "":
            self._sym = dummy_sym
        elif sp is None:
            self._sym = None
        else:
            self._sym = sp.Symbol(name, real=True, imaginary=False,
                                  commutative=True, hermitian=True, complex=True)

            # Store parameter
            store_symbol_parameter(self)

        # Set the value using the check functionality
        # (Only if not called from derived class)
        if type(self) == ScalarParam:
            self.setvalue(value)
Beispiel #15
0
    def __init__(self, value, options, name="", description=""):
        """
        Initialize the OptionParam

        Arguments
        ---------
        value : scalars or str
            The initial value of this parameter. The type of value is stored
            for future type checks
        options : list
            A list of acceptable values for the parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """
        check_arg(options, list)
        if len(options) < 2:
            value_error(
                "expected the options argument to be at least of length 2")

        super(OptionParam, self).__init__(value, name, description)

        # Check valid types for an 'option check'
        for option in options:
            if not isinstance(option, option_types):
                type_error("options can only be 'str' and scalars got: '%s'" % \
                           type(option).__name__)

        # Define a 'check function'
        self._in_range = lambda value: value in options

        # Define some string used for pretty print
        self._in_str = "%%s \xe2\x88\x88 %s" % repr(options)

        self._not_in_str = "%%s \xe2\x88\x89 %s" % repr(options)

        # Define a 'repr string'
        #self._repr_str = "OptionParam(%%s, %s)" % repr(options)

        # Set the value using the check functionality
        self.setvalue(value)

        # Check that all values in options has the same type
        for val in options:
            if not isinstance(val, self.value_type):
                type_error("All values of the 'option check' " +\
                           "need to be of type: '%s'" % type(self._value).__name__)

        # Store options
        self._options = options
    def __init__(self, value, options, name="", description=""):
        """
        Initialize the OptionParam

        Arguments
        ---------
        value : scalars or str
            The initial value of this parameter. The type of value is stored
            for future type checks
        options : list
            A list of acceptable values for the parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """
        check_arg(options, list)
        if len(options) < 2:
            value_error("expected the options argument to be at least of length 2")

        super(OptionParam, self).__init__(value, name, description)

        # Check valid types for an 'option check'
        for option in options:
            if not isinstance(option, option_types):
                type_error("options can only be 'str' and scalars got: '%s'" % \
                           type(option).__name__)

        # Define a 'check function'
        self._in_range = lambda value : value in options

        # Define some string used for pretty print
        self._in_str = "%%s \xe2\x88\x88 %s" % repr(options)

        self._not_in_str = "%%s \xe2\x88\x89 %s" % repr(options)

        # Define a 'repr string'
        #self._repr_str = "OptionParam(%%s, %s)" % repr(options)

        # Set the value using the check functionality
        self.setvalue(value)

        # Check that all values in options has the same type
        for val in options:
            if not isinstance(val, self.value_type):
                type_error("All values of the 'option check' " +\
                           "need to be of type: '%s'" % type(self._value).__name__)

        # Store options
        self._options = options
    def __init__(self, opts, nfilter_max=512, **kwargs):
        super(EngineGenerator, self).__init__()

        self.opts = opts

        self.z_dim = opts.z
        self.num_components = opts.num_components
        self.hdim = opts.hidden_dim
        self.expand_dim = opts.nfilterG

        if opts.do_memory:
            self.base_dim = opts.memory_dim
        else:
            self.base_dim = opts.hidden_dim
        state_dim_multiplier = 1
        if utils.check_arg(self.opts, 'state_dim_multiplier'):
            state_dim_multiplier = self.opts.state_dim_multiplier
        state_dim = self.base_dim * state_dim_multiplier

        # Memory Module
        if self.opts.do_memory:
            self.memory = Memory(opts, state_dim)

        # Dynamics Engine
        self.engine = EngineModule(opts, state_dim)
        self.simple_enc = model_utils.choose_netG_encoder(basechannel=state_dim, opts=self.opts)

        # Rendering Engine
        self.graphics_renderer = RenderingEngine(G_ch=opts.nfilterG, opts=opts, resolution=self.opts.img_size[0])

        self.num_components = self.opts.num_components
Beispiel #18
0
    def run_warmup(self,
                   zdist,
                   states,
                   actions,
                   warm_up,
                   train=True,
                   M=None,
                   prev_alpha=None,
                   prev_read_v=None,
                   force_sharp=False):
        '''
        Run warm-up phase
        '''
        batch_size = states[0].size(0)
        prev_state = None
        h, c = self.engine.init_hidden(batch_size)
        h = utils.check_gpu(self.opts.gpu, h)
        c = utils.check_gpu(self.opts.gpu, c)

        outputs, maps, zs, alphas, alpha_logits = [], [], [], [], []
        init_maps = []

        if utils.check_arg(self.opts, 'do_memory'):
            # initialize memory and alpha
            if M is None:
                M = self.memory.init_memory(batch_size)
            if prev_alpha is None:
                prev_alpha = utils.check_gpu(
                    self.opts.gpu, torch.zeros(batch_size,
                                               self.memory.num_mem))
                mem_wh = int(math.sqrt(prev_alpha.size(1)))
                prev_alpha[:, mem_wh * (mem_wh // 2) + mem_wh // 2] = 1.0
            if prev_read_v is None:
                prev_read_v = utils.check_gpu(
                    self.opts.gpu, torch.zeros(batch_size,
                                               self.opts.memory_dim))
        alpha_losses = 0
        base_imgs_all = []
        hiddens = []
        for i in range(warm_up):
            input_state = states[i]
            prev_state, m, prev_alpha, alpha_loss, z, M, prev_read_v, h, c, init_map, base_imgs, _, cur_hidden = self.run_step(
                input_state, h, c, actions[i], \
                batch_size, prev_read_v, prev_alpha, M, zdist, step=i, force_sharp=force_sharp)
            outputs.append(prev_state)
            maps.append(m)
            alphas.append(prev_alpha)
            alpha_losses += alpha_loss
            zs.append(z)
            base_imgs_all.append(base_imgs)
            hiddens.append(cur_hidden)
            init_maps.append(init_map)

        warm_up_state = [h, c]
        if prev_state is None:
            prev_state = states[
                0]  # warm_up is 0, the initial screen is always used
        return prev_state, warm_up_state, M, prev_read_v, prev_alpha, outputs, maps, alphas, alpha_losses, zs, base_imgs_all, 0, \
               hiddens, init_maps
    def _set_name(self, name):
        """
        Set the name. Can only be done if not set during instantiation
        """
        check_arg(name, str)

        super(ScalarParam, self)._set_name(name)

        if sp is None:
            return

        # Create a new symbol with the updated name
        self._sym = sp.Symbol(name, real=True, imaginary=False,
                              commutative=True, hermitian=True, complex=True)

        # Store parameter
        store_symbol_parameter(self)
Beispiel #20
0
    def __getitem__(self, idx):
        data = np.load(self.samples[idx], encoding='latin1')
        states, actions, neg_actions = [], [], []
        ep_len = len(data[0]['np_img_state']) - self.opts.num_steps

        start_pt = random.randint(0, max(ep_len - 1, 0))

        i = 0
        samples = []
        cur_sample = 0
        while i < self.opts.num_steps:
            try:
                if start_pt + i >= len(data[0]['np_img_state']) - 2:
                    cur_s = data[0]['np_img_state'][
                        len(data[0]['np_img_state']) - 2]
                    cur_a = np.zeros(self.opts.action_space)
                    cur_a[0] = 1
                else:
                    cur_s = data[0]['np_img_state'][start_pt + i]
                    cur_a = data[0]['np_action'][start_pt + i]
            except:
                import pdb
                pdb.set_trace()

            if self.opts.img_size[0] != cur_s.shape[1] or self.opts.img_size[
                    1] != cur_s.shape[0]:
                cur_s = cv2.resize(
                    cur_s.astype('float32'),
                    dsize=(self.opts.img_size[0], self.opts.img_size[1]),
                )

            s_t = (np.transpose(cur_s, axes=(2, 0, 1)) /
                   255.).astype('float32')

            s_t = (s_t - 0.5) / 0.5
            if utils.check_arg(self.opts, 'normalize_mean'):
                s_t = s_t - np.array([-0.9219, -0.9101, -0.8536]).reshape(
                    (3, 1, 1)).astype('float32')
                s_t = s_t / 1.9219

            a_t = np.copy(cur_a).astype('float32')
            action_idx = cur_a.tolist().index(1)

            # false action
            false_a_idx = random.randint(0, 4)
            while false_a_idx == action_idx:
                false_a_idx = random.randint(0, 4)
            false_a_t = np.zeros(cur_a.shape).astype('float32')
            false_a_t[false_a_idx] = 1

            # save
            states.append(s_t)
            actions.append(a_t)
            neg_actions.append(false_a_t)
            samples.append(cur_sample)
            i = i + 1

        return states, actions, neg_actions
Beispiel #21
0
    def __getattr__(self, key):
        check_arg(key, str, 0, ParameterDict.__getattr__)

        # Fix for newer ipython
        if key in [
                "__dict__", "__methods__", "trait_names", "_getAttributeNames"
        ]:
            return

        if not dict.__contains__(self, key):
            error("'%s' is not an item in this ParameterDict." % key, \
                  exception=AttributeError)

        value = dict.__getitem__(self, key)

        if isinstance(value, Param):
            value = value.getvalue()
        return value
Beispiel #22
0
    def update(self, other):
        """
        A recursive update that handles parameter subsets
        correctly unlike dict.update.
        """
        check_arg(other, dict, 0, ParameterDict.update)

        for key in dict.iterkeys(other):
            if key not in self:
                continue
            self_value = self[key]
            other_value = other[key]
            if isinstance(self_value, dict):
                # Update my own subdict with others subdict
                self_value.update(other_value)
            elif isinstance(self_value, Param):
                # Set my own value to others value
                self_value.setvalue(other_value)
            else:
                self[key] = other_value
Beispiel #23
0
    def _set_name(self, name):
        """
        Set the name. Can only be done if not set during instantiation
        """
        check_arg(name, str)

        super(ScalarParam, self)._set_name(name)

        if sp is None:
            return

        # Create a new symbol with the updated name
        self._sym = sp.Symbol(name,
                              real=True,
                              imaginary=False,
                              commutative=True,
                              hermitian=True,
                              complex=True)

        # Store parameter
        store_symbol_parameter(self)
Beispiel #24
0
    def __setattr__(self, key, value):

        check_arg(key, str, 0, ParameterDict.__setattr__)

        if key == "_members":
            dict.__setattr__(self, key, value)
            return

        # Check if key is a registered parameter
        if not dict.__contains__(self, key):
            error("'%s' is not an item in this ParameterDict." % key, \
                  exception=AttributeError)

        # Get the original value, used for checks
        org_value = dict.__getitem__(self, key)

        if isinstance(org_value, ParameterDict):
            type_error("cannot overwrite a ParameterDict")

        # Set the new value
        if isinstance(org_value, Param):
            org_value.setvalue(value)
        else:
            dict.__setitem__(self, key, value)
Beispiel #25
0
    def __init__(self, opts, set_type=0, permute_color=False, datadir=''):
        self.opts = opts
        self.set_type = set_type
        self.permute_color = permute_color

        self.samples = []
        num_data = len(os.listdir(datadir))
        if set_type == 0:
            sample_list = list(range(0, int(num_data * 0.9)))
        else:
            sample_list = list(range(int(num_data * 0.9), num_data))
        for el in sample_list:
            self.samples.append('%s/%d.npy' % (datadir, el))
        self.end_bias = 0
        if utils.check_arg(self.opts, 'end_bias'):
            self.end_bias = self.opts.end_bias
def add_pair_to_subs(subs, old, new):
    """
    Add a pair of old and new symbols to subs. If a subs with old as a
    key already excist it will be removed before insertion.
    """
    check_arg(subs, list, 0)
    check_arg(old, sp.Basic, 1)
    check_arg(new, sp.Basic, 2)
    
    for ind, (old0, new0) in enumerate(subs):
        if old0 == old:
            subs.pop(ind)
            break
    subs.append((old, new))
Beispiel #27
0
def add_pair_to_subs(subs, old, new):
    """
    Add a pair of old and new symbols to subs. If a subs with old as a
    key already excist it will be removed before insertion.
    """
    check_arg(subs, list, 0)
    check_arg(old, sp.Basic, 1)
    check_arg(new, sp.Basic, 2)

    for ind, (old0, new0) in enumerate(subs):
        if old0 == old:
            subs.pop(ind)
            break
    subs.append((old, new))
Beispiel #28
0
    def __init__(self, opts, set_type=0, permute_color=False, datadir=''):

        self.opts = opts
        self.set_type = set_type
        self.permute_color = permute_color

        self.samples = []
        files = os.listdir(datadir)
        num_data = len(files)
        if set_type == 0:
            sample_list = files[:int(num_data * 0.9)]
        else:
            sample_list = files[int(num_data * 0.9):]

        # Here's the difference - we're using gzipped pickle
        # (additionally checking if teh file exists)
        for file in sample_list:
            path = f'{datadir}/{file}'
            self.samples.append(path)

        # Bias
        self.end_bias = self.opts.end_bias if utils.check_arg(
            self.opts, 'end_bias') else 0.5
    def __init__(self, opts, state_dim):
        super(EngineModule, self).__init__()
        self.hdim = opts.hidden_dim
        self.opts = opts

        action_space = 10 if not utils.check_arg(self.opts, 'action_space') else self.opts.action_space

        self.a_to_input = nn.Sequential(nn.Linear(action_space, self.hdim),
                                        nn.LeakyReLU(0.2))
        self.z_to_input = nn.Sequential(nn.Linear(opts.z, self.hdim),
                                        nn.LeakyReLU(0.2))
        # engine module input network
        e_input_dim = self.hdim*2
        if self.opts.do_memory:
            e_input_dim += self.opts.memory_dim

        self.f_e = nn.Sequential(nn.Linear(e_input_dim, e_input_dim),
                                 nn.LeakyReLU(0.2))

        self.rnn_e = ActionLSTM(e_input_dim,
                                hidden_size=self.hdim, opts=opts)

        self.state_bias = nn.Sequential(nn.Linear(state_dim, self.hdim * 4))
    def run_step(self, state, h, c, action, batch_size, prev_read_v, prev_alpha, M, zdist,
                read_only=False, step=0, decode=True, play=False, force_sharp=False):
        '''
        Run the model one time step
        '''

        # encode the image input
        if self.opts.input_detach:
            state = state.detach()
        s = self.simple_enc(state)

        # sample a noise
        z = utils.check_gpu(self.opts.gpu, zdist.sample((batch_size,)))

        # run dynamics engine
        prev_hidden = h[0].clone()
        h, c, cur_hidden = self.engine(h, c, s, action, z, prev_read_v=prev_read_v, step=step)

        # run memory module
        if self.opts.do_memory:
            base, M, alpha, prev_read_v = self.memory(cur_hidden, action, prev_hidden, prev_alpha, M, c=c[0], read_only=read_only, force_sharp=force_sharp)
            prev_alpha = alpha
            bases = base
        else:
            base = cur_hidden
            bases = [base] * self.num_components

        # run the rendering engine
        alpha_loss = 0
        out, m, eloss, init_maps, base_imgs = self.graphics_renderer(bases, num_components=self.num_components)
        if utils.check_arg(self.opts, 'alpha_loss_multiplier') and self.opts.alpha_loss_multiplier > 0:
            # memory regularization
            for i in range(1, len(m)):
                alpha_loss += (m[i].abs().sum() / batch_size)

        prev_state = out
        return prev_state, m, prev_alpha, alpha_loss, z, M, prev_read_v, h, c, init_maps, base_imgs, 0, cur_hidden
Beispiel #31
0
            you must specify sourcename and device identifier
"""
elif sys.argv[0] == "list":
    for file in os.listdir(config.map["global"]["flow_dir"]):
        if file.endswith(".flow"):
            name = file[file.find("/") + 1 : -5]
            print "Flow name: " + name
            fmap = config.read_struct(config.map["global"]["flow_dir"] + "/" + file)
            print "\tSource type(s): [" + ",".join(fmap["source_types"]) + "]"
            print "\tTasks:\n\t\t" + "\n\t\t".join(fmap["tasks"])
            print ""
elif sys.argv[0] == "run":
    fromtime = datetime.now() - timedelta(weeks=1)
    totime = datetime.now()

    ret, vals, sys.argv = utils.check_arg(sys.argv, "--from", 1)
    if ret:
        try:
            fromtime = utils.str_to_date(vals[0])
        except ValueError, msg:
            print "Bad from time: " + str(msg)

    ret, vals, sys.argv = utils.check_arg(sys.argv, "--to", 1)
    if ret:
        try:
            totime = utils.str_to_date(vals[0])
        except ValueError, msg:
            print "Bad to time: " + str(msg)

    pretend, j, sys.argv = utils.check_arg(sys.argv, "--pretend")
    local, j, sys.argv = utils.check_arg(sys.argv, "--local")
Beispiel #32
0
#!/usr/bin/python3

import sys
from utils import check_arg, get_values, print_res
from bubble import bubble
from selection import selection
from insertion import insertion
from merge import merge
from quicksort import quicksort

def main(args):
    numbers = get_values(args[0])
    if (len(numbers) <= 0):
        sys.exit(84)
    print(len(numbers), " element", sep="", end="")
    print("s" if len(numbers) > 1 else "")
    selection(numbers[::])
    insertion(numbers[::])
    bubble(numbers[::])
    print_res("Quicksort:", 0 if (len(numbers) <= 1) else quicksort(numbers[::])[1])
    print_res("Merge sort:", merge(numbers[::])[1])

try:
    check_arg(sys.argv)
    main(sys.argv[1:])
except:
    sys.exit(84)
        entries = learningdb.getHMMGaussianEmissions(id);
        gparm = learningdb.computeGlobalHMMGaussianParameters(entries);
        print "\tPlugload \"%s\" Total Learning Sets: %d"%(plname,len(entries))
        for g in gparm:
            print "\t\tState %-3d: N=%-7d mean=%20.10e variance=%20.10e"%(g.state_id,g.counts,g.mean,g.variance)
        print "\n";
    
    
elif (op == 'insert'):
    import devicedb,postgresops
    devicedb.connect();
    fromtime = None
    totime = None
    dtype = "GHMM"
    
    ret,vals,sys.argv = utils.check_arg(sys.argv,'--from',1)
    if ret:
        try:
            fromtime = utils.str_to_date(vals[0])
        except ValueError, msg:
            print "Bad from time: "+str(msg)
    
    ret,vals,sys.argv = utils.check_arg(sys.argv,'--to',1)
    if ret:
        try:
            totime = utils.str_to_date(vals[0])
        except ValueError, msg:
            print "Bad to time: "+str(msg)
            
    ret,vals,sys.argv = utils.check_arg(sys.argv,'--type',1)
    if ret:
Beispiel #34
0
    def __init__(self, opts, nfilter=32, nfilter_max=1024):
        super(Discriminator, self).__init__()

        self.opts = opts
        self.simple_blocks = utils.check_arg(self.opts, 'simple_blocks')

        # single frame discrimiantor
        f_size = 4
        if self.opts.img_size[0] == 84:
            f_size = 5
        elif self.simple_blocks:
            f_size = 3
            expand_dim = opts.nfilterD * 4
            if opts.img_size[0] == 128:
                self.ds = nn.Sequential(
                    SN(nn.Conv2d(3, expand_dim, 4, 2)), nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim, expand_dim * 2, 3, 2)),
                    nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim * 2, expand_dim * 4, 3, 2)),
                    nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim * 4, expand_dim * 4, 3, 2)),
                    nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim * 4, expand_dim * 4, 3, 2)),
                    nn.LeakyReLU(0.2), View((-1, expand_dim * 4, 3, 3)))
            else:
                self.ds = nn.Sequential(
                    SN(nn.Conv2d(3, expand_dim, 4, 2)), nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim, expand_dim * 2, 3, 2)),
                    nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim * 2, expand_dim * 4, 3, 2)),
                    nn.LeakyReLU(0.2),
                    SN(nn.Conv2d(expand_dim * 4, expand_dim * 4, 3, 2)),
                    nn.LeakyReLU(0.2), View((-1, expand_dim * 4, 3, 3)))
            # patch level logits
            self.single_frame_discriminator_patch = nn.Sequential(
                SN(nn.Conv2d(expand_dim * 4, expand_dim * 4, 2, 1, 1)),
                nn.LeakyReLU(0.2), SN(nn.Conv2d(expand_dim * 4, 1, 2, 1)),
                View((-1, 1, 3, 3)))
            # single logit for entire image
            self.single_frame_discriminator_all = nn.Sequential(
                SN(nn.Conv2d(expand_dim * 4, expand_dim * 4, 2, 1)),
                nn.LeakyReLU(0.2), SN(nn.Conv2d(expand_dim * 4, 1, 2, 1)),
                View((-1, 1)))
            conv3d_dim = self.opts.nfilterD_temp
        else:
            # bigGAN discriminator architecture
            self.ds = DiscriminatorSingle(D_ch=opts.nfilterD,
                                          opts=opts,
                                          resolution=self.opts.img_size[0])
            conv3d_dim = self.opts.nfilterD_temp

        # temporal discriminator
        self.temporal_window = self.opts.config_temporal
        self.conv3d, self.conv3d_final = \
            model_utils.choose_netD_temporal(
                self.opts, conv3d_dim, window=self.temporal_window
            )
        self.conv3d = nn.ModuleList(self.conv3d)
        self.conv3d_final = nn.ModuleList(self.conv3d_final)

        self.which_conv = functools.partial(layers.SNConv2d,
                                            kernel_size=f_size,
                                            padding=0,
                                            num_svs=1,
                                            num_itrs=1,
                                            eps=1e-12)
        self.which_linear = functools.partial(layers.SNLinear,
                                              num_svs=1,
                                              num_itrs=1,
                                              eps=1e-12)

        # action-conditioned discriminator
        self.trans_conv = self.which_conv(opts.nfilterD * 16 * 2, 256)
        self.action_linear1 = self.which_linear(512, 512)
        self.action_linear_out = self.which_linear(512, 1)

        action_space = 10 if not utils.check_arg(
            self.opts, 'action_space') else self.opts.action_space
        self.action_to_feat = nn.Linear(action_space, 256)
        self.to_transition_feature = nn.Sequential(self.trans_conv,
                                                   nn.LeakyReLU(0.2),
                                                   View((-1, 256)))
        self.action_discriminator = nn.Sequential(self.action_linear1,
                                                  nn.LeakyReLU(0.2),
                                                  self.action_linear_out)
        self.reconstruct_action_z = nn.Sequential(
            nn.Linear(256, action_space + self.opts.z), )  # 4, 1, 0),
Beispiel #35
0
 def _set_name(self, name):
     check_arg(name, str)
     if self._name:
         value_error("Cannot set name attribute of %s, it is already set "\
               "to '%s'" % (self.__class__.__name__, self._name))
     self._name = name
Beispiel #36
0
    def __init__(self, value, size=None, ge=None, le=None, gt=None, lt=None, \
                 unit="1", name="", description=""):
        """
        Creating an ArrayParam

        Arguments
        ---------
        value : scalar, np.ndarray
            The initial value of this parameter
        size : integer (optional)
            Set the size of the ns.array. If set value must be a scalar
        gt : scalar (optional)
            Greater than, range control of argument
        ge : scalar (optional)
            Greater than or equal, range control of argument
        lt : scalar (optional)
            Lesser than, range control of argument
        le : scalar (optional)
            Lesser than or equal, range control of argument
        unit : str (optional, if sympy is available)
            The unit of the scalar parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """

        if np is None:
            error("numpy is not installed so ArrayParam is not available")

        # If setting value using size
        if size is not None:
            # If a size is provided a scalar is expected for the value
            check_kwarg(size, "size", integers, ArrayParam, ge=1)
            check_arg(value, scalars, 0, ArrayParam)

            # Create numpy array based on the passed value
            # Use intc and float_ to be compatible with c code.
            value = np.array([value]*size, dtype=np.intc \
                             if isinstance(value, integers) \
                             else np.float_)

        # If setting value using only value argument
        else:

            # Allow using list of scalars
            if isinstance(value, list):
                check_arg(value, list, 0, ArrayParam, scalars)
                if len(value) == 0:
                    value_error("expected a list with at least 1 element")
                value = np.fromiter(value, dtype=type(value[0]))

            check_arg(value, nptypes, 0, ArrayParam)

            # Fist check any scalars passed
            if isinstance(value, integers):
                value = np.array([value], dtype=np.intc)
            elif isinstance(value, scalars):
                value = np.array([value], dtype=np.float_)

            # Then check passed NumPy arrays
            elif value.dtype in integers:
                value = value.astype(np.intc)
            elif value.dtype in scalars:
                value = value.astype(np.float_)
            else:
                type_error("expected a scalar or a scalar valued np.ndarray"
                           "or list as value argument.")

        # Init super class with dummy value
        super(ArrayParam, self).__init__(value[0], ge, le, gt, lt, unit, \
                                         name, description)

        # Assign value
        self._value = value
        self.value_type = nptypes

        # Use setvalue to set value using the range
        self.setvalue(value)
 def _set_name(self, name):
     check_arg(name, str)
     if self._name:
         value_error("Cannot set name attribute of %s, it is already set "\
               "to '%s'" % (self.__class__.__name__, self._name))
     self._name = name
    def __init__(self, value, size=None, ge=None, le=None, gt=None, lt=None, \
                 unit="1", name="", description=""):
        """
        Creating an ArrayParam

        Arguments
        ---------
        value : scalar, np.ndarray
            The initial value of this parameter
        size : integer (optional)
            Set the size of the ns.array. If set value must be a scalar
        gt : scalar (optional)
            Greater than, range control of argument
        ge : scalar (optional)
            Greater than or equal, range control of argument
        lt : scalar (optional)
            Lesser than, range control of argument
        le : scalar (optional)
            Lesser than or equal, range control of argument
        unit : str (optional, if sympy is available)
            The unit of the scalar parameter
        name : str (optional)
            The name of the parameter. Used in pretty printing
        description : str (optional)
            A description associated with the Parameter
        """

        if np is None:
            error("numpy is not installed so ArrayParam is not available")

        # If setting value using size
        if size is not None:
            # If a size is provided a scalar is expected for the value
            check_kwarg(size, "size", integers, ArrayParam, ge=1)
            check_arg(value, scalars, 0, ArrayParam)

            # Create numpy array based on the passed value
            # Use intc and float_ to be compatible with c code.
            value = np.array([value]*size, dtype=np.intc \
                             if isinstance(value, integers) \
                             else np.float_)

        # If setting value using only value argument
        else:

            # Allow using list of scalars
            if isinstance(value, list):
                check_arg(value, list, 0, ArrayParam, scalars)
                if len(value)==0:
                    value_error("expected a list with at least 1 element")
                value = np.fromiter(value, dtype=type(value[0]))

            check_arg(value, nptypes, 0, ArrayParam)

            # Fist check any scalars passed
            if isinstance(value, integers):
                value = np.array([value], dtype=np.intc)
            elif isinstance(value, scalars):
                value = np.array([value], dtype=np.float_)

            # Then check passed NumPy arrays
            elif value.dtype in integers:
                value = value.astype(np.intc)
            elif value.dtype in scalars:
                value = value.astype(np.float_)
            else:
                type_error("expected a scalar or a scalar valued np.ndarray"
                           "or list as value argument.")

        # Init super class with dummy value
        super(ArrayParam, self).__init__(value[0], ge, le, gt, lt, unit, \
                                         name, description)

        # Assign value
        self._value = value
        self.value_type = nptypes

        # Use setvalue to set value using the range
        self.setvalue(value)
Beispiel #39
0
def choose_netD_temporal(opts, conv3d_dim, window=[]):
    '''
    temporal discriminator
    '''
    in_dim = opts.nfilterD * 16
    extractors, finals = [], []

    # temporarily hand-designed for steps 6 / 12 / 18
    first_spatial_filter = 2
    if opts.img_size[0] == 84:
        first_spatial_filter = 3
    if utils.check_arg(opts, 'simple_blocks'):
        net1 = nn.Sequential(
            SN(nn.Conv3d(in_dim, conv3d_dim, (2, 2, 2), (1, 1, 1))),
            nn.LeakyReLU(0.2),
            SN(nn.Conv3d(conv3d_dim, conv3d_dim * 2, (3, 2, 2), (2, 1, 1))),
            nn.LeakyReLU(0.2))
        head1 = nn.Sequential(
            SN(nn.Conv3d(conv3d_dim * 2, 1, (2, 1, 1), (2, 1, 1))), )
        extractors.append(net1)
        finals.append(head1)

        if window > 6:  # 18
            net2 = nn.Sequential(
                SN(
                    nn.Conv3d(conv3d_dim * 2, conv3d_dim * 4, (3, 1, 1),
                              (2, 1, 1))),
                nn.LeakyReLU(0.2),
            )
            head2 = nn.Sequential(SN(nn.Conv3d(conv3d_dim * 4, 1,
                                               (2, 1, 1))), )
            extractors.append(net2)
            finals.append(head2)

        if window > 18:  # 32
            net3 = nn.Sequential(
                SN(
                    nn.Conv3d(conv3d_dim * 4, conv3d_dim * 8, (3, 1, 1),
                              (2, 1, 1))),
                nn.LeakyReLU(0.2),
            )
            head3 = nn.Sequential(SN(nn.Conv3d(conv3d_dim * 8, 1,
                                               (3, 1, 1))), )
            extractors.append(net3)
            finals.append(head3)
    elif 'sn' in opts.D_temp_mode:
        net1 = nn.Sequential(
            SN(
                nn.Conv3d(in_dim, conv3d_dim,
                          (2, first_spatial_filter, first_spatial_filter),
                          (1, 1, 1))), nn.LeakyReLU(0.2),
            SN(nn.Conv3d(conv3d_dim, conv3d_dim * 2, (3, 3, 3), (2, 1, 1))),
            nn.LeakyReLU(0.2))
        head1 = nn.Sequential(
            SN(nn.Conv3d(conv3d_dim * 2, 1, (2, 1, 1), (1, 1, 1))), )
        extractors.append(net1)
        finals.append(head1)

        if window >= 12:  # 12
            net2 = nn.Sequential(
                SN(
                    nn.Conv3d(conv3d_dim * 2, conv3d_dim * 4, (3, 1, 1),
                              (1, 1, 1))),
                nn.LeakyReLU(0.2),
            )
            head2 = nn.Sequential(SN(nn.Conv3d(conv3d_dim * 4, 1,
                                               (3, 1, 1))), )
            extractors.append(net2)
            finals.append(head2)

        if window >= 18:  # 18
            net3 = nn.Sequential(
                SN(
                    nn.Conv3d(conv3d_dim * 4, conv3d_dim * 8, (3, 1, 1),
                              (2, 1, 1))),
                nn.LeakyReLU(0.2),
            )
            if window == 18 or window == 28:
                head3 = nn.Sequential(
                    SN(nn.Conv3d(conv3d_dim * 8, 1, (2, 1, 1), (1, 1, 1))), )
            else:
                head3 = nn.Sequential(
                    SN(nn.Conv3d(conv3d_dim * 8, 1, (4, 1, 1), (2, 1, 1))), )
            extractors.append(net3)
            finals.append(head3)
    else:
        net1 = nn.Sequential(
            nn.Conv3d(in_dim, conv3d_dim, (2, 3, 3), (1, 1, 1)),
            nn.LeakyReLU(0.2),
            nn.Conv3d(conv3d_dim, conv3d_dim * 2, (3, 3, 3), (2, 1, 1)),
            nn.LeakyReLU(0.2))
        head1 = nn.Sequential(
            nn.Conv3d(conv3d_dim * 2, 1, (2, 1, 1), (1, 1, 1)), )
        extractors.append(net1)
        finals.append(head1)

        if window >= 12:  # 12
            net2 = nn.Sequential(
                nn.Conv3d(conv3d_dim * 2, conv3d_dim * 4, (3, 1, 1),
                          (1, 1, 1)),
                nn.LeakyReLU(0.2),
            )
            head2 = nn.Sequential(nn.Conv3d(conv3d_dim * 4, 1, (3, 1, 1)), )
            extractors.append(net2)
            finals.append(head2)

        if window >= 18:  # 18
            net3 = nn.Sequential(
                nn.Conv3d(conv3d_dim * 4, conv3d_dim * 8, (2, 1, 1),
                          (2, 1, 1)),
                nn.LeakyReLU(0.2),
            )
            head3 = nn.Sequential(nn.Conv3d(conv3d_dim * 8, 1, (3, 1, 1)), )
            extractors.append(net3)
            finals.append(head3)

    return extractors, finals
    def __init__(self, G_ch=64, dim_z=512, bottom_width=8, resolution=64,
                 G_kernel_size=3, G_attn='64', n_classes=1000,
                 num_G_SVs=1, num_G_SV_itrs=1,
                 G_shared=True, shared_dim=0, hier=False,
                 cross_replica=False, mybn=False,
                 G_activation=nn.ReLU(inplace=False),
                 G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
                 BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
                 G_init='ortho', skip_init=False, no_optim=False,
                 G_param='SN', norm_style='bn', opts=None,
                 **kwargs):
        super(RenderingEngine, self).__init__()
        # Channel width mulitplier
        self.ch = G_ch
        # Dimensionality of the latent space
        self.dim_z = opts.hidden_dim if not opts.do_memory else opts.memory_dim
        # The initial spatial dimensions
        if resolution == 84 or utils.check_arg(opts, 'simple_blocks'):
            bottom_width = 7

        self.bottom_width = bottom_width
        # Resolution of the output
        self.resolution = resolution
        # Kernel size?
        self.kernel_size = G_kernel_size
        # Attention?
        self.attention = G_attn
        # number of classes, for use in categorical conditional generation
        self.n_classes = n_classes
        # Use shared embeddings?
        self.G_shared = G_shared
        # Dimensionality of the shared embedding? Unused if not using G_shared
        self.shared_dim = shared_dim if shared_dim > 0 else dim_z
        # Hierarchical latent space?
        self.hier = hier
        # Cross replica batchnorm?
        self.cross_replica = cross_replica
        # Use my batchnorm?
        self.mybn = mybn
        # nonlinearity for residual blocks
        self.activation = G_activation
        # Initialization style
        self.init = G_init
        # Parameterization style
        self.G_param = G_param
        # Normalization style
        self.norm_style = norm_style
        # Epsilon for BatchNorm?
        self.BN_eps = BN_eps
        # Epsilon for Spectral Norm?
        self.SN_eps = SN_eps
        # fp16?
        self.fp16 = G_fp16
        # Architecture dict
        self.arch = G_arch(self.ch, self.attention)[resolution]
        self.opts = opts

        # If using hierarchical latents, adjust z
        if self.hier:
            # Number of places z slots into
            self.num_slots = len(self.arch['in_channels']) + 1
            self.z_chunk_size = (self.dim_z // self.num_slots)
            # Recalculate latent dimensionality for even splitting into chunks
            self.dim_z = self.z_chunk_size * self.num_slots
        else:
            self.num_slots = 1
            self.z_chunk_size = 0

        # Which convs, batchnorms, and linear layers to use
        if self.G_param == 'SN':

            self.which_conv = functools.partial(layers.SNConv2d,
                                                kernel_size=3, padding=1,
                                                num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
                                                eps=self.SN_eps)
            self.which_conv = functools.partial(layers.SNConv2d,
                                                kernel_size=3, padding=1,
                                                num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
                                                eps=self.SN_eps)
            self.which_conv_ker2 = functools.partial(layers.SNConv2d,
                                                kernel_size=2, padding=0,
                                                num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
                                                eps=self.SN_eps)
            self.get_map_conv = functools.partial(layers.SNConv2d,
                                                kernel_size=1, padding=0,
                                                num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
                                                eps=self.SN_eps)
            self.which_linear = functools.partial(layers.SNLinear,
                                                  num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
                                                  eps=self.SN_eps)
        else:
            self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
            self.which_linear = nn.Linear

        # We use a non-spectral-normed embedding here regardless;
        # For some reason applying SN to G's embedding seems to randomly cripple G
        self.which_embedding = nn.Embedding

        # Prepare model
        # First linear layer
        self.linear = []
        self.get_map = []
        self.all_blocks = []
        self.output_layer = []
        self.spade_layers = []
        self.out_to_one_dim = []
        self.repeat = opts.num_components

        self.free_dynamic_component = utils.check_arg(self.opts, 'free_dynamic_component')

        for ind in range(self.repeat):
            if self.repeat > 1:
                in_dim = self.dim_z

                att_dim = self.opts.att_dim

                self.get_map.append(nn.Sequential(
                    nn.Linear(self.dim_z, (1 + att_dim) * (self.bottom_width ** 2)),
                    View((-1, 1 + att_dim, self.bottom_width, self.bottom_width))
                ))
                if self.opts.spade_index > -1:
                    if self.opts.spade_index == 0:
                        spade_in_chan = self.opts.fixed_v_dim
                    else:
                        spade_in_chan = self.arch['in_channels'][self.opts.spade_index]
                    self.spade_layers.append(SPADE(spade_in_chan, att_dim))

                if (ind == 1 and not self.free_dynamic_component) or (ind==0):
                    self.linear.append(nn.Sequential(self.which_linear(in_dim,self.opts.fixed_v_dim)))
                else:
                    self.linear.append(self.which_linear(in_dim,
                                                         self.arch['in_channels'][0] * (self.bottom_width ** 2)))

            else:
                in_dim = self.dim_z
                self.linear.append(self.which_linear(in_dim,
                                                     self.arch['in_channels'][0] * (self.bottom_width ** 2)))

            # self.blocks is a doubly-nested list of modules, the outer loop intended
            # to be over blocks at a given resolution (resblocks and/or self-attention)
            # while the inner loop is over a given block
            self.blocks = []
            for index in range(len(self.arch['out_channels'])):
                upsample_factor = 2
                if index == 0:
                    self.in_dim = self.arch['in_channels'][index] if (ind == 1 and self.free_dynamic_component) or (self.repeat < 2) else self.opts.fixed_v_dim
                    in_dim = self.in_dim
                    if resolution == 84:
                        upsample_factor = 3
                else:
                    in_dim = self.arch['in_channels'][index]

                if utils.check_arg(opts, 'simple_blocks'):
                    self.blocks += [[layers.SimpleGBlock(in_channels=in_dim,
                                                   out_channels=self.arch['out_channels'][index],
                                                    num_conv = 2 if index == 0 else 1
                                                   )]]
                else:
                    self.blocks += [[layers.GBlock(in_channels=in_dim,
                                                   out_channels=self.arch['out_channels'][index],
                                                   which_conv=self.which_conv,
                                                   activation=self.activation,
                                                   upsample=(functools.partial(F.interpolate, scale_factor=upsample_factor)
                                                             if self.arch['upsample'][index] else None))]]

                # If attention on this block, attach it to the end
                if self.arch['attention'][self.arch['resolution'][index]] and (self.repeat <= 1 or not utils.check_arg(self.opts, 'no_attention')):
                    if not utils.check_arg(opts, 'simple_blocks'):
                        print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
                        self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]

            # Turn self.blocks into a ModuleList so that it's all properly registered.
            self.all_blocks.append(nn.ModuleList([nn.ModuleList(block) for block in self.blocks]))

            # output layer: batchnorm-relu-conv.
            # Consider using a non-spectral conv here
            last_conv = self.which_conv
            if utils.check_arg(self.opts, 'simple_blocks'):
                last_conv = self.which_conv_ker2
            if self.opts.no_in:
                self.output_layer.append(nn.Sequential(self.activation,
                                                       last_conv(self.arch['out_channels'][-1], 3)))
            else:
                self.output_layer.append(nn.Sequential(nn.InstanceNorm2d(self.arch['out_channels'][-1]),
                                                  self.activation,
                                                    last_conv(self.arch['out_channels'][-1], 3)))
            if self.repeat > 1:
                self.out_to_one_dim.append(nn.Sequential(self.activation,
                                            last_conv(self.arch['out_channels'][-1], 1),
                                                         nn.LeakyReLU(0.2)))

        self.linear = nn.ModuleList(self.linear)
        self.all_blocks = nn.ModuleList(self.all_blocks)
        self.output_layer = nn.ModuleList(self.output_layer)

        if self.repeat > 1:
            if self.opts.spade_index > -1:
                self.spade_layers = nn.ModuleList(self.spade_layers)
            self.get_map = nn.ModuleList(self.get_map)
            self.out_to_one_dim = nn.ModuleList(self.out_to_one_dim)
            self.fine_mask = nn.Sequential(nn.Conv2d(opts.num_components, 512, 1, 1),
                                           nn.LeakyReLU(0.2),
                                           nn.Conv2d(512, opts.num_components, 1))

        else:
            if self.opts.do_memory:
                self.out_to_one_dim = nn.Sequential(self.activation,
                                                    self.which_conv(self.arch['out_channels'][-1], 1),
                                                    nn.LeakyReLU(0.2))
                self.fine_mask = nn.Sequential(nn.Conv2d(2, 512, 1, 1),
                                               nn.LeakyReLU(0.2),
                                               nn.Conv2d(512, 2, 1))

        self.base_temperature = self.opts.temperature
        if utils.check_arg(self.opts, 'base_temperature'):
            self.base_temperature = self.opts.base_temperature

        # Initialize weights. Optionally skip init for testing.
        if not skip_init:
            self.init_weights()