Example #1
0
    def forward(self, x):
        N, C, H, W = x.size()
        y = x.clone()
        x = self.net1.base(x)
        y = self.net2.base(y)
        xs, ys = [], []
        for stage_id in range(self.num_stages):
            x = self.net1.stages[stage_id](x)
            y = self.net2.stages[stage_id](y)
            if isinstance(x, list):
                x[0], y[0] = self.nddrs['nddrs'][stage_id](x[0], y[0])
            else:
                x, y = self.nddrs['nddrs'][stage_id](x, y)
            if self.aux and self.training and stage_id in self.cfg.TRAIN.AUX_LAYERS:
                xs.append(x)
                ys.append(y)
        x = self.net1.head(x)
        y = self.net2.head(y)
        result = AttrDict({'out1': x, 'out2': y})

        if self.aux and self.training:
            _, _, h, w = x.size()
            aux_x = torch.cat([
                F.interpolate(_x, (h, w), mode='bilinear', align_corners=True)
                for _x in xs[:-1]
            ] + [xs[-1]],
                              dim=1)
            aux_y = torch.cat([
                F.interpolate(_y, (h, w), mode='bilinear', align_corners=True)
                for _y in ys[:-1]
            ] + [ys[-1]],
                              dim=1)
            result.aux1 = self.aux_conv1(aux_x)
            result.aux2 = self.aux_conv2(aux_y)
        return result
Example #2
0
def cfg_merge_dicts(dict_a, dict_b):
    from ast import literal_eval

    for key, value in dict_a.items():
        if key not in dict_b:
            raise KeyError('Invalid key in config file: {}'.format(key))
        if type(value) is dict:
            dict_a[key] = value = AttrDict(value)
        if isinstance(value, str):
            try:
                value = literal_eval(value)
            except BaseException:
                pass
        # the types must match, too
        old_type = type(dict_b[key])
        if old_type is not type(value) and value is not None:
            raise ValueError(
                'Type mismatch ({} vs. {}) for config key: {}'.format(
                    type(dict_b[key]), type(value), key))
        # recursively merge dicts
        if isinstance(value, AttrDict):
            try:
                cfg_merge_dicts(dict_a[key], dict_b[key])
            except BaseException:
                raise Exception('Error under config key: {}'.format(key))
        else:
            dict_b[key] = value
Example #3
0
def cfg_merge_dicts(dict_a, dict_b):
    #将dict_a中有的value值赋给dict_b中对应的key值,即以dict_a为准,对dict_b进行修改,而dict_a保持不变
    #cfg_merge_dicts(yaml_config, __C)
    from ast import literal_eval

    for key, value in dict_a.items():
        # print('key, value',key, value)
        if key not in dict_b:
            raise KeyError('Invalid key in config file: {}'.format(key))
        if type(value) is dict:
            dict_a[key] = value = AttrDict(value)
        if isinstance(value, str):
            #isinstance用来判断value是否是str类型
            try:
                value = literal_eval(value)
            except BaseException:
                pass
        # the types must match, too
        old_type = type(dict_b[key])
        if old_type is not type(value) and value is not None:
            raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(dict_b[key]), type(value), key))
        # recursively merge dicts
        if isinstance(value, AttrDict):
            try:
                # print('ren',dict_a[key],dict_b[key])
                cfg_merge_dicts(dict_a[key], dict_b[key])
            except BaseException:
                raise Exception('Error under config key: {}'.format(key))
        else:
            dict_b[key] = value
Example #4
0
 def forward(self, x):
     x = self.net1.base(x)
     for stage_id in range(self.num_stages):
         x = self.net1.stages[stage_id](x)
     out1 = self.net1.head(x)
     out2 = self.net2.head(x)
     return AttrDict({'out1': out1, 'out2': out2})
Example #5
0
 def forward(self, x):
     N, C, H, W = x.size()
     y = x.clone()
     x = self.net1.base(x)
     y = self.net2.base(y)
     for stage_id in range(self.num_stages):
         x = self.net1.stages[stage_id](x)
         y = self.net2.stages[stage_id](y)
     x = self.net1.head(x)
     y = self.net2.head(y)
     return AttrDict({'out1': x, 'out2': y})
Example #6
0
        self.power_when_cast = self.power()
        super(Vote, self).save(*largs, **kwargs)

    def power(self):
        # Follow reverse delgation chain to discover how much power we have.
        p = 1

        return p

    def get_value(self):
        return self.power() * self.value


STATEMENT_TYPE = AttrDict({
    'REFERENCE': 0,
    'ASSUMPTION': 1,
    'STATEMENT': 2,
    'HEADER': 3,
})

STATEMENT_TYPE_CHOICES = tuple((v, k.title()) for k, v in STATEMENT_TYPE.items())

CP_TYPE = AttrDict({
    'DELETED': 1,
    'MOVED': 2,
    'CHANGED': 3,
    'ADDED': 4,
})

CP_TYPE_CHOICES = tuple((v, k.title()) for k, v in CP_TYPE.items())

Example #7
0
    def forward(self, x):
        N, C, H, W = x.size()
        y = x.clone()
        x = self.net1.base(x)
        y = self.net2.base(y)
        xs, ys = [], []
        for stage_id in range(self.num_stages):
            x = self.net1.stages[stage_id](x)
            y = self.net2.stages[stage_id](y)
            if isinstance(x, list):
                xs.append(x[0])
                ys.append(y[0])
            else:
                xs.append(x)
                ys.append(y)

            net1_path_ids = np.nonzero(
                self.net1_connectivity_matrix[stage_id])[0]
            net2_path_ids = np.nonzero(
                self.net2_connectivity_matrix[stage_id])[0]
            net1_path_weights = self.net1_alphas[stage_id][net1_path_ids]
            net2_path_weights = self.net2_alphas[stage_id][net2_path_ids]

            # Calculating path strength based on weights
            if self.training:
                if self.supernet:
                    connectivity = 'all'
                elif self.retraining:
                    connectivity = 'onehot'
                else:
                    if self.arch_training:  # Training architecture
                        if self.hard_arch_training:
                            connectivity = 'gumbel'
                        else:
                            connectivity = 'sigmoid'
                    else:  # Training weights
                        if self.hard_weight_training:
                            connectivity = 'gumbel'
                        else:
                            connectivity = 'sigmoid'
            else:
                if self.supernet:
                    connectivity = 'all'
                elif self.retraining:
                    connectivity = 'onehot'
                elif self.stochastic_evaluation:
                    assert not self.hard_evaluation
                    connectivity = 'bernoulli'
                elif self.hard_evaluation:
                    connectivity = 'onehot'
                else:
                    connectivity = 'sigmoid'

            if connectivity == 'gumbel':
                net1_path_connectivity = self.gumbel_connectivity(
                    net1_path_weights)
                net2_path_connectivity = self.gumbel_connectivity(
                    net2_path_weights)
            elif connectivity == 'sigmoid':
                net1_path_connectivity = self.sigmoid_connectivity(
                    net1_path_weights)
                net2_path_connectivity = self.sigmoid_connectivity(
                    net2_path_weights)
            elif connectivity == 'all':
                net1_path_connectivity = self.all_connectivity(
                    net1_path_weights)
                net2_path_connectivity = self.all_connectivity(
                    net2_path_weights)
            elif connectivity == 'bernoulli':
                net1_path_connectivity = self.bernoulli_connectivity(
                    net1_path_weights)
                net2_path_connectivity = self.bernoulli_connectivity(
                    net2_path_weights)
            else:
                assert connectivity == 'onehot'
                net1_path_connectivity = self.onehot_connectivity(
                    net1_path_weights)
                net2_path_connectivity = self.onehot_connectivity(
                    net2_path_weights)

            if isinstance(x, list):
                net1_fusion_input = [x[0]]
                net2_fusion_input = [y[0]]
            else:
                net1_fusion_input = [x]
                net2_fusion_input = [y]

            for idx, input_id in enumerate(net1_path_ids):
                net1_fusion_input.append(net1_path_connectivity[idx] *
                                         ys[input_id])
            for idx, input_id in enumerate(net2_path_ids):
                net2_fusion_input.append(net2_path_connectivity[idx] *
                                         xs[input_id])

            if isinstance(x, list):
                x[0] = self.paths['net1_paths'][stage_id](net1_fusion_input)
                y[0] = self.paths['net2_paths'][stage_id](net2_fusion_input)
            else:
                x = self.paths['net1_paths'][stage_id](net1_fusion_input)
                y = self.paths['net2_paths'][stage_id](net2_fusion_input)

        x = self.net1.head(x)
        y = self.net2.head(y)
        return AttrDict({'out1': x, 'out2': y})
Example #8
0
Definition for all configuration options for training/testing Timeception model on various datasets.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import logging
import sys

from core.utils import AttrDict

logger = logging.getLogger(__name__)

__C = AttrDict()
cfg = __C

# region Misc

__C.DEBUG = False  # is debugging
__C.NUM_GPUS = 1  # how many gups to use
__C.LOG_PERIOD = 10  # log period
__C.DATASET_NAME = str('')  # name of dataset

# endregion

# region Model

__C.MODEL = AttrDict()
__C.MODEL.CLASSIFICATION_TYPE = str(