示例#1
0
    def __init__(self,
                 num_inputs,
                 recurrent=False,
                 hidden_size=512,
                 map_width=20):
        super(MicropolisBase_fixedmap, self).__init__(recurrent, hidden_size,
                                                      hidden_size)

        self.map_width = map_width
        self.num_maps = int(math.log(self.map_width, 2))
        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0.1), nn.init.calculate_gain('relu'))

        self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))

        self.conv_0 = nn.Conv2d(num_inputs, 64, 1, 1, 0)
        init_(self.conv_0)
        self.conv_1 = nn.Conv2d(64, 64, 5, 1, 2)
        init_(self.conv_1)
        self.conv_2 = nn.Conv2d(64, 64, 3, 1, 1)
        init_(self.conv_2)
        self.critic_compress = init_(nn.Conv2d(79, 64, 3, 1, 1))
        self.critic_downsize = init_(nn.Conv2d(64, 64, 3, 2, 1))
        self.critic_conv = init_(nn.Conv2d(64, 64, 3, 2, 1))

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0))

        self.actor_compress = init_(nn.Conv2d(79, 19, 3, 1, 1))
        self.critic_conv_1 = init_(nn.Conv2d(64, 1, 1, 1, 0))
        #       self.critic_conv_2 = init_(nn.Conv2d(1, 1, 2, 1, 0)) # for 40x40 map
        self.train()
示例#2
0
    def __init__(self,
                 num_inputs,
                 recurrent=False,
                 hidden_size=256,
                 map_width=20,
                 num_actions=18):

        super(MicropolisBase_FullyConv, self).__init__(recurrent, hidden_size,
                                                       hidden_size)
        num_actions = num_actions
        self.map_width = map_width
        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0.1), nn.init.calculate_gain('relu'))

        self.embed = init_(nn.Conv2d(num_inputs, 32, 1, 1, 0))
        self.k5 = init_(nn.Conv2d(32, 16, 5, 1, 2))
        self.k3 = init_(nn.Conv2d(16, 32, 3, 1, 1))
        state_size = map_width * map_width * 32

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.dense = init_(nn.Linear(state_size, 256))
        self.val = init_(nn.Linear(256, 1))

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0))

        self.act = init_(nn.Conv2d(32, num_actions, 1, 1, 0))
示例#3
0
    def __init__(self,
                 num_inputs,
                 activation,
                 recurrent=False,
                 hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
        self.activation = activation
        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
        self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
        self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))
        if self.activation == 0:
            self.f1 = init_(nn.Linear(11264, hidden_size))
            print("Use relu activation for f1 layer")
        elif self.activation == 1:
            init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                   constant_(x, 0),
                                   nn.init.calculate_gain('tanh'))
            self.f1 = init_(nn.Linear(11264, hidden_size))
            print("Use tanh activation for f1 layer")
        else:
            init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                   constant_(x, 0),
                                   nn.init.calculate_gain('sigmoid'))
            self.f1 = init_(nn.Linear(11264, hidden_size))

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#4
0
    def __init__(self, chan, outputs, atoms):
        super(CNNBase, self).__init__()  #recurrent, hidden_size, hidden_size)
        self.actions = outputs
        self.chan = chan
        self.atoms = atoms
        self.new_size = 1
        self.dist = Cat(self.atoms, self.actions)
        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        # For 80x60 input
        self.main = nn.Sequential(
            init_(nn.Conv2d(chan, 32, kernel_size=5, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            #Print(),
            init_(nn.Conv2d(32, 32, kernel_size=5, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            init_(nn.Conv2d(32, 32, kernel_size=4, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),

            #Print(),
            Flatten(),
            init_(nn.Linear(1568, self.atoms)),
            #Print(),
            nn.ReLU())

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(self.atoms, 1))

        self.train()
示例#5
0
    def __init__(self,
                 obs_shape,
                 num_actions,
                 base_kwargs=None,
                 extra_kwargs=None):
        super(Policy, self).__init__()
        self.use_backpack = extra_kwargs['use_backpack']
        self.recurrent_hidden_state_size = 1
        num_outputs = num_actions
        hidden_size = 512
        conv_init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                    constant_(x, 0),
                                    nn.init.calculate_gain('relu'))
        lin_init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                   constant_(x, 0))
        self.model = nn.Sequential(
            conv_init_(nn.Conv2d(obs_shape[0], 32, 8, stride=4)), nn.ReLU(),
            conv_init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            conv_init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            conv_init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU(),
            lin_init_(nn.Linear(hidden_size, num_outputs)))
        if self.use_backpack:
            extend(self.model)

        self.model.train()
示例#6
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=128):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        # For 80x60 input
        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, kernel_size=5, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            init_(nn.Conv2d(32, 32, kernel_size=5, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            init_(nn.Conv2d(32, 32, kernel_size=4, stride=2)),
            nn.BatchNorm2d(32),
            nn.ReLU(),

            #Print(),
            Flatten(),

            #nn.Dropout(0.2),
            init_(nn.Linear(32 * 7 * 5, hidden_size)),
            nn.ReLU())

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#7
0
def make_corpus():
    utils.init()
    interests = list(utils.get_all_interests())
    corpus = MyCorpus(interests)
    corpus.build_interests_to_articles()
    corpus.build_dict()
    corpus.write_corpus()
示例#8
0
    def __init__(self):
        super(Decoder, self).__init__()
        self.emb_dim = config.emb_dim
        self.hidden_dim = config.hidden_dim
        self.attn_word = WordAttention()
        self.attn_sec = SectionAttention()
        self.combine_context = nn.Linear(self.hidden_dim * 2 + self.emb_dim,
                                         self.emb_dim)
        self.softmax = nn.Softmax(dim=1)
        self.rnn = nn.LSTM(self.emb_dim,
                           self.hidden_dim,
                           num_layers=1,
                           batch_first=True,
                           bidirectional=False)
        utils.init(self.rnn)

        if config.pointer:
            self.sigmoid = nn.Sigmoid()
            self.linear_pointer = nn.Linear(self.emb_dim + self.hidden_dim * 4,
                                            1)

        self.feat = nn.Linear(self.hidden_dim * 2,
                              self.hidden_dim * 2,
                              bias=False)
        self.linear = nn.Linear(self.hidden_dim * 3, self.hidden_dim)
        self.linear.weight.data.normal_(std=1e-4)
        self.linear.bias.data.normal_(std=1e-4)

        self.vocab = nn.Linear(self.hidden_dim, config.vocab_size)
        self.vocab.weight.data.normal_(std=1e-4)
        self.vocab.bias.data.normal_(std=1e-4)
示例#9
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=512):
        super(MicropolisBase, self).__init__(recurrent, hidden_size,
                                             hidden_size)

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0), nn.init.calculate_gain('relu'))

        import sys

        self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))

        self.conv_0 = nn.Conv2d(num_inputs, 64, 1, 1, 0)
        init_(self.conv_0)
        self.conv_1 = nn.Conv2d(64, 64, 5, 1, 2)
        init_(self.conv_1)
        self.conv_2 = nn.Conv2d(1, 1, 3, 1, 0)
        init_(self.conv_2)
        self.conv_2_chan = nn.ConvTranspose2d(1, 1, (1, 3), 1, 0)
        init_(self.conv_2_chan)
        self.conv_3 = nn.ConvTranspose2d(1, 1, 3, 1, 0)
        init_(self.conv_3)
        self.conv_3_chan = nn.Conv2d(1, 1, (1, 3), 1, 0)
        init_(self.conv_3_chan)

        self.actor_compress = init_(nn.Conv2d(79, 20, 3, 1, 1))

        self.critic_compress = init_(nn.Conv2d(79, 8, 1, 1, 1))

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0))

        self.critic_conv_1 = init_(nn.Conv2d(8, 1, 20, 20, 0))
        self.train()
示例#10
0
    def generate(self, in_base_path, out_base_path):
        self.in_base_path = in_base_path;
        self.out_base_path = out_base_path;
        
        utils.makedirs(out_base_path);                
        imgutils.init(in_base_path);
        utils.init(in_base_path);
        
        self.blog = Struct(json.load(utils.open_file(self.in_base_path + "/blog.json")));

        # copy static content
        cmd = "cp -rf " + in_base_path + "/static/* " + out_base_path;
        print("copy static content: " + cmd)
        proc = utils.execute_shell(cmd);
        
        # 'dynamic' content
        for c in ["sticky", "posts"]:
            setattr(self, c, []);
            self.generate_content(c);        
        
        # home page
        self.generate_home();

        # feed
        self.generate_feed();
示例#11
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        self.emb = nn.Embedding(10, 8)

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs * 8, 64, 3, stride=2, padding=1)),
            nn.ReLU(), init_(nn.Conv2d(64, 128, 3, stride=2, padding=1)),
            nn.ReLU(), init_(nn.Conv2d(128, 256, 3, stride=2, padding=1)),
            nn.ReLU(), Flatten(), init_(nn.Linear(256 * 1 * 1, hidden_size)),
            nn.ReLU())
        """
        self.main = nn.Sequential(
            Flatten(),
            init_(nn.Linear(3 * 8 * 8, 512)),
            nn.ReLU(),
            init_(nn.Linear(512, 512)),
            nn.ReLU(),
            init_(nn.Linear(512, hidden_size)),
            nn.ReLU()
        )
        """

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#12
0
    def __init__(self, num_inputs, activation=1, modulation=False, recurrent=False, hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
        print("model modulation:", modulation)
        self.activation = activation
        self.modulation = modulation
        init_ = lambda m: init(m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0),
            nn.init.calculate_gain('relu'))

        self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
        self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
        self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))

        if self.activation == 1:
            init_ = lambda m: init(m,
                nn.init.orthogonal_,
                lambda x: nn.init.constant_(x, 0),
                nn.init.calculate_gain('tanh'))
        self.f1 = init_(nn.Linear(32 * 7 * 7, hidden_size))

        init_ = lambda m: init(m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0))

        if self.modulation:
            self.critic_linear = init_(nn.Linear(hidden_size+1, 1))
        else:
            self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
    def __init__(self,
                 num_inputs,
                 recurrent=False,
                 hidden_size=512,
                 obs_mean=0.0,
                 obs_std=255.0):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))
        self.obs_mean = obs_mean
        self.obs_std = obs_std

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)),
            nn.LeakyReLU(0.2, inplace=True), nn.BatchNorm2d(32),
            init_(nn.Conv2d(32, 64, 4, stride=2)),
            nn.LeakyReLU(0.2, inplace=True), nn.BatchNorm2d(64),
            init_(nn.Conv2d(64, 64, 3, stride=1)),
            nn.LeakyReLU(0.2, inplace=True), nn.BatchNorm2d(64), Flatten(),
            init_(nn.Linear(64 * 7 * 7, hidden_size)),
            nn.BatchNorm1d(hidden_size))

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
    def __init__(
        self, obs_shape, action_space, use_cuda
    ):  #use_cuda is not used and for compatibility reasons (I2A needs the use_cuda parameter)
        super(AtariModel, self).__init__()
        from i2a.utils import get_linear_dims_after_conv

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        input_channels = obs_shape[0]
        input_dims = obs_shape[1:]

        self.conv1 = init_(
            nn.Conv2d(input_channels, 16, kernel_size=3, stride=1, padding=0))
        self.conv2 = init_(
            nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0))
        self.conv3 = init_(
            nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0))
        self.conv4 = init_(
            nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=0))

        self.linear_input_size = get_linear_dims_after_conv(
            [self.conv1, self.conv2, self.conv3, self.conv4], input_dims)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.linear1 = init_(nn.Linear(self.linear_input_size, 256))

        self.critic_linear = init_(nn.Linear(256, 1))
        self.actor_linear = init_(nn.Linear(256, action_space))

        self.train()
示例#15
0
    def __init__(self):
        super(Encoder, self).__init__()
        self.emb_dim = config.emb_dim
        self.hidden_dim = config.hidden_dim
        self.dropout = nn.Dropout(config.drop_out)

        self.rnn_word = nn.LSTM(self.emb_dim,
                                self.hidden_dim,
                                num_layers=config.enc_layers,
                                bidirectional=config.enc_bidi,
                                batch_first=True)
        self.rnn_sec = nn.LSTM(self.hidden_dim,
                               self.hidden_dim,
                               num_layers=config.enc_layers,
                               bidirectional=config.enc_bidi,
                               batch_first=True)
        utils.init(self.rnn_word)
        utils.init(self.rnn_sec)

        self.sec = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
        self.linear_hidden = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
        self.linear_hidden.weight.data.normal_(std=1e-4)
        self.linear_hidden.bias.data.normal_(std=1e-4)

        self.linear_cell = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
        self.linear_cell.weight.data.normal_(std=1e-4)
        self.linear_cell.bias.data.normal_(std=1e-4)
示例#16
0
    def __init__(self, num_inputs, use_gru):
        super(CNNBase, self).__init__()

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 7 * 7, 512)), nn.ReLU())

        if use_gru:
            self.gru = nn.GRUCell(512, 512)
            nn.init.orthogonal_(self.gru.weight_ih.data)
            nn.init.orthogonal_(self.gru.weight_hh.data)
            self.gru.bias_ih.data.fill_(0)
            self.gru.bias_hh.data.fill_(0)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(512, 1))

        self.train()
示例#17
0
def main():
    utils.init()
    #interests = set(list(utils.get_all_interests())[:50])
    interests = utils.get_all_interests()
    matrix = build_article_adjacencies(interests)
    write_matrix(matrix)
    write_ids_to_indexes()
示例#18
0
    def __init__(self,
                 num_inputs,
                 recurrent=False,
                 hidden_size=512,
                 map_width=20,
                 num_actions=18):
        super(MicropolisBase_fixed, self).__init__(recurrent, hidden_size,
                                                   hidden_size)

        self.num_recursions = map_width

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0.1), nn.init.calculate_gain('relu'))

        self.skip_compress = init_(nn.Conv2d(num_inputs, 15, 1, stride=1))

        self.conv_0 = init_(nn.Conv2d(num_inputs, 64, 1, 1, 0))
        self.conv_1 = init_(nn.Conv2d(64, 64, 5, 1, 2))
        for i in range(1):
            setattr(self, 'conv_2_{}'.format(i),
                    init_(nn.Conv2d(64, 64, 3, 1, 1)))
        self.critic_compress = init_(nn.Conv2d(79, 64, 3, 1, 1))
        for i in range(1):
            setattr(self, 'critic_downsize_{}'.format(i),
                    init_(nn.Conv2d(64, 64, 2, 2, 0)))

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0))

        self.actor_compress = init_(nn.Conv2d(79, 19, 3, 1, 1))
        self.critic_conv = init_(nn.Conv2d(64, 1, 1, 1, 0))
        self.train()
示例#19
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0),
            nn.init.calculate_gain('relu'))

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)),
            nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)),
            nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)),
            nn.ReLU(),
            Flatten(),
            init_(nn.Linear(32 * 7 * 7, hidden_size)),
            nn.ReLU()
        )

        init_ = lambda m: init(m,
            nn.init.orthogonal_,
            lambda x: nn.init.constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#20
0
def main():
    utils.init()
    # interests = set(list(utils.get_all_interests())[:50])
    interests = utils.get_all_interests()
    matrix = build_article_adjacencies(interests)
    write_matrix(matrix)
    write_ids_to_indexes()
示例#21
0
文件: model.py 项目: jeffleft/ece276c
    def __init__(self, num_inputs, use_gru, num_actions):
        super(CNNBaseUnshare, self).__init__()

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        init2_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                constant_(x, 0))

        self.actor = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 7 * 7, 512)), nn.ReLU(),
            init2_(nn.Linear(512, num_actions)))

        self.critic = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 7 * 7, 512)), nn.ReLU(),
            init2_(nn.Linear(512, 1)))

        if use_gru:
            raise NotImplementedError

        self.train()
示例#22
0
    def __init__(self,
                 img_num_features,
                 v_num_features,
                 recurrent=False,
                 hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        self.cnn_backbone = nn.Sequential(
            init_(nn.Conv2d(img_num_features, 32, 8, stride=4)), nn.ReLU(),
            init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
            init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
            init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.fc_backbone = nn.Sequential(
            init_(nn.Linear(v_num_features, hidden_size // 4)), nn.ReLU(),
            init_(nn.Linear(hidden_size // 4, hidden_size // 2)), nn.ReLU(),
            init_(nn.Linear(hidden_size // 2, hidden_size)), nn.ReLU())

        self.fc_joint = nn.Sequential(
            init_(nn.Linear(hidden_size * 2, hidden_size * 2)), nn.ReLU(),
            init_(nn.Linear(hidden_size * 2, hidden_size)), nn.ReLU(),
            init_(nn.Linear(hidden_size, hidden_size)), nn.ReLU())

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#23
0
    def __init__(self,
                 num_inputs,
                 recurrent=False,
                 hidden_size=512,
                 map_width=20):
        super(MicropolisBase_squeeze, self).__init__(recurrent, hidden_size,
                                                     hidden_size)
        self.chunk_size = 2
        self.map_width = map_width
        #self.num_maps = 4
        self.num_maps = int(math.log(
            self.map_width, self.chunk_size))  # how many different sizes

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, -10), nn.init.calculate_gain('relu'))
        linit_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                                constant_(x, 0))

        self.cmp_in = init_(nn.Conv2d(num_inputs, 64, 1, stride=1, padding=0))
        for i in range(self.num_maps):
            setattr(self, 'prj_life_obs_{}'.format(i),
                    init_(nn.Conv2d(64, 64, 3, stride=1, padding=1)))
            setattr(self, 'cmp_life_obs_{}'.format(i),
                    init_(nn.Conv2d(128, 64, 3, stride=1, padding=1)))

    #self.shrink_life = init_(nn.Conv2d(64, 64, 3, 3, 0))

    #self.conv_1 = init_(nn.Conv2d(64, 64, 3, 1, 1))
    #self.lin_0 = linit_(nn.Linear(1024, 1024))

        for i in range(self.num_maps):
            if i == 0:
                setattr(self, 'dwn_{}'.format(i),
                        init_(nn.Conv2d(64, 64, 2, stride=2, padding=0)))
            setattr(
                self, 'expand_life_{}'.format(i),
                init_(nn.ConvTranspose2d(64 + 64, 64, 2, stride=2, padding=0)))
            setattr(self, 'prj_life_act_{}'.format(i),
                    init_(nn.Conv2d(64, 64, 3, stride=1, padding=1)))
            setattr(self, 'cmp_life_act_{}'.format(i),
                    init_(nn.Conv2d(128, 64, 3, stride=1, padding=1)))
            setattr(self, 'cmp_life_val_in_{}'.format(i),
                    init_(nn.Conv2d(128, 64, 3, stride=1, padding=1)))
            setattr(self, 'dwn_val_{}'.format(i),
                    init_(nn.Conv2d(64, 64, 2, stride=2, padding=0)))
            if i == self.num_maps - 1:
                setattr(self, 'prj_life_val_{}'.format(i),
                        init_(nn.Conv2d(64, 64, 3, stride=1, padding=1)))
            else:
                setattr(self, 'prj_life_val_{}'.format(i),
                        init_(nn.Conv2d(64, 64, 3, stride=1, padding=1)))

        self.cmp_act = init_(nn.Conv2d(128, 64, 3, stride=1, padding=1))

        init_ = lambda m: init(m, nn.init.dirac_, lambda x: nn.init.constant_(
            x, 0.1))

        self.act_tomap = init_(nn.Conv2d(64, 19, 5, stride=1, padding=2))
        self.cmp_val_out = init_(nn.Conv2d(64, 1, 1, stride=1, padding=0))
        self.train()
示例#24
0
def main():
    import utils
    from call_graph_analysis import process_jni_dir
    utils.init()
    init_arg_parser()
    logging.info(config.DEFAULT_AOSP_PATH)
    logging.info(config.framework_core_jni_path())
    process_jni_dir(config.framework_core_jni_path())
示例#25
0
def main():
    utils.init()
    last_update_id = None
    features = Features()
    while True:
        updates = get_updates(last_update_id)
        if len(updates["result"]) > 0:
            last_update_id = get_last_update_id(updates) + 1
            features.music(updates)
            controlButtons(updates["result"])
        time.sleep(0.5)
示例#26
0
    def __init__(self, num_inputs, hidden_size=512, num_actions=4, 
                       use_duel=False, use_noisy_net=False):
        super(IQN_C51, self).__init__()
        self.num_actions   = num_actions
        self.use_duel      = use_duel
        self.use_noisy_net = use_noisy_net
        self.quantile_embedding_dim = 64
        self.pi = np.pi


        init_ = lambda m: init(m, 
                               nn.init.kaiming_uniform_,
                               lambda x: nn.init.constant_(x, 0), 
                               gain=nn.init.calculate_gain('relu'),
                               mode='fan_in')
        init2_ = lambda m: init(m, 
                               nn.init.kaiming_uniform_,
                               lambda x: nn.init.constant_(x, 0), 
                               gain=nn.init.calculate_gain('relu'),
                               mode='fan_in')        


        self.conv1 = init_(nn.Conv2d(num_inputs, 32, 8, stride=4))
        self.conv2 = init_(nn.Conv2d(32, 64, 4, stride=2))
        self.conv3 = init_(nn.Conv2d(64, 32, 3, stride=1))    

        if use_noisy_net:
            Linear = NoisyLinear
        else:
            Linear = nn.Linear
        # ----------------------------------------------------------------------------
        # self.fc1 = Linear(32*7*7, hidden_size)
        self.fc2 = Linear(hidden_size, num_actions*1)
        # ----------------------------------------------------------------------------
        Atari_Input = torch.FloatTensor(1, num_inputs, 84, 84)
        temp_fea    = self.conv3(self.conv2(self.conv1(Atari_Input)))
        temp_fea    = temp_fea.view(temp_fea.size(0), -1)
        state_net_size = temp_fea.size(1)
        del Atari_Input
        del temp_fea

        self.quantile_fc0 = nn.Linear(self.quantile_embedding_dim, state_net_size)
        self.quantile_fc1 = nn.Linear(state_net_size, hidden_size)
        # ----------------------------------------------------------------------------
        if self.use_duel:
            self.quantile_fc_value  = Linear(hidden_size, 1)

        # Param init
        if not use_noisy_net:
            self.quantile_fc0 = init2_(self.quantile_fc0)
            self.quantile_fc1 = init2_(self.quantile_fc1)
            self.fc2 = init2_(self.fc2)
            if self.use_duel:
                self.quantile_fc_value = init2_(self.quantile_fc_value)
示例#27
0
    def __init__(self, *args, **kwargs):
        # Inherit from basic framework
        MobotFramework.__init__(self)
        rpyc.Service.__init__(self, *args, **kwargs)
        init("mobot service instance created")

        # Status
        self._connected = False

        # Remote video streaming therad
        self.videostop = threading.Event()
        self.videothd = None
示例#28
0
文件: model.py 项目: hlwang1124/dal
    def __init__(self, num_inputs, recurrent=False, hidden_size=512):
        super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0), nn.init.calculate_gain('relu'))

        num_inputs = 6
        # hidden_size = 100
        # self.main = nn.Sequential(
        #     init_(nn.Conv2d(num_inputs, 32, 8, stride=4)),
        #     nn.ReLU(),
        #     init_(nn.Conv2d(32, 64, 4, stride=2)),
        #     nn.ReLU(),
        #     init_(nn.Conv2d(64, 32, 3, stride=1)),
        #     nn.ReLU(),
        #     Flatten(),
        #     init_(nn.Linear(32 * 7 * 7, hidden_size)),
        #     nn.ReLU()
        # )

        # self.main = nn.Sequential(
        #     init_(nn.Conv2d(num_inputs, 16, 3,padding=1, stride=1)),
        #     nn.ReLU(),
        #     nn.MaxPool2d(kernel_size = 2),
        #     init_(nn.Conv2d(16, 32, 3,padding=1, stride=1)),
        #     nn.ReLU(),
        #     nn.MaxPool2d(kernel_size = 2),
        #     init_(nn.Conv2d(32, 32, 3,padding=1, stride=1)),
        #     nn.ReLU(),
        #     nn.MaxPool2d(kernel_size = 2),
        #     init_(nn.Conv2d(32, 16, 3,padding=1, stride=1)),
        #     nn.ReLU(),
        #     # nn.MaxPool2d(kernel_size = 2),
        #     Flatten(),
        #     init_(nn.Linear(16 * 11 * 11, hidden_size)),
        #     nn.ReLU()
        # )

        self.main = nn.Sequential(
            init_(nn.Conv2d(num_inputs, 16, 3, padding=1, stride=1)),
            nn.ReLU(), init_(nn.Conv2d(16, 32, 3, padding=1, stride=1)),
            nn.ReLU(), init_(nn.Conv2d(32, 32, 3, padding=1, stride=1)),
            nn.ReLU(), init_(nn.Conv2d(32, 16, 3, padding=1, stride=1)),
            nn.ReLU(), Flatten(), init_(nn.Linear(16 * 11 * 11, hidden_size)),
            nn.ReLU())

        init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
                               constant_(x, 0))

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#29
0
def black_box_eval(files, dataset, test_loader, epsilon, alpha):
    '''
    list of models to use for black-box attacks
    :param duos:
    :return:
    '''
    res = []

    model_s, optimizer_s, use_cuda_s = init(dataset, 0.001)
    model_t, optimizer_t, use_cuda_t = init(dataset, 0.001)
    for i, target in enumerate(files):
        sres = [target, [], [], []]
        for j, source in enumerate(files):
            if j == i:
                continue
            print(f"attacking {target} from {source}")
            model_s, optimizer_s, _ = load_check_point(source, model_s,
                                                       optimizer_s)
            model_t, optimizer_t, _ = load_check_point(target, model_t,
                                                       optimizer_t)

            accs = [target]
            try:
                print("running fgsm")
                accs.append(
                    fgsm_attack(test_loader,
                                model_s,
                                True,
                                F.cross_entropy,
                                test_batch,
                                init_ep=epsilon,
                                max_range=1,
                                target_model=model_t))
                for it in [20, 100]:
                    accs.append(
                        invoke_pgd_attack(test_loader,
                                          model_s,
                                          True,
                                          epsilon,
                                          alpha,
                                          F.cross_entropy,
                                          test_batch,
                                          iters=it,
                                          target_model=model_t))
                res.append(accs)
            except Exception as e:
                print(f"Exception model: {e}")
                res.append(sres)
                continue
    print(f"res:\n {res}")
    with open(f"blackboxdataset{dataset}", "wb") as f:
        pickle.dump(res, f)
示例#30
0
def main():
    utils.init()
    for line in sys.stdin:
        tokens = line.split('\t')
        page_id_str = tokens[0]
        if hash(page_id_str) % int(1 / FRACTION_PAGES_TO_SUMMARIZE) != 0:
            continue
        results_str = tokens[-1]
        if len(page_id_str) < 2:
            print 'invalid page key: %s' % ` page_id_str `
        else:
            page_id = int(page_id_str[1:-1])  # drop open / close quotes
            features = tokens[1] if len(tokens) == 3 else ''
            summarize(page_id, results_str.strip(), features)
示例#31
0
def main():
    utils.init()
    
    articleIds = getArticleIds()
    random.shuffle(articleIds)
    i = 0
    for id in articleIds:
        if i >= N_COLS * N_ROWS:
            break 
        scores = getScores(id)[:MAX_VALUES]
        if scores:
            plotOne(id, scores, i)
            i += 1
    pyplot.show()
示例#32
0
def main():
    utils.init()
    for line in sys.stdin:
        tokens = line.split('\t')
        page_id_str = tokens[0]
        if hash(page_id_str) % int(1/FRACTION_PAGES_TO_SUMMARIZE) != 0:
            continue
        results_str = tokens[-1]
        if len(page_id_str) < 2:
            print 'invalid page key: %s' % `page_id_str`
        else:   
            page_id = int(page_id_str[1:-1])   # drop open / close quotes
            features = tokens[1] if len(tokens) == 3 else ''
            summarize(page_id, results_str.strip(), features)
示例#33
0
def main():
    utils.init()

    articleIds = getArticleIds()
    random.shuffle(articleIds)
    i = 0
    for id in articleIds:
        if i >= N_COLS * N_ROWS:
            break
        scores = getScores(id)[:MAX_VALUES]
        if scores:
            plotOne(id, scores, i)
            i += 1
    pyplot.show()
示例#34
0
    def __init__(self, num_inputs, recurrent=False, hidden_size=64):
        super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)

        if recurrent:
            num_inputs = hidden_size

        init_ = lambda m: init(m,
            init_normc_,
            lambda x: nn.init.constant_(x, 0))

        self.actor = nn.Sequential(
            init_(nn.Linear(num_inputs, hidden_size)),
            nn.Tanh(),
            init_(nn.Linear(hidden_size, hidden_size)),
            nn.Tanh()
        )

        self.critic = nn.Sequential(
            init_(nn.Linear(num_inputs, hidden_size)),
            nn.Tanh(),
            init_(nn.Linear(hidden_size, hidden_size)),
            nn.Tanh()
        )

        self.critic_linear = init_(nn.Linear(hidden_size, 1))

        self.train()
示例#35
0
def solve_random(n,m):
    file_out = open('output.txt', 'w')

    initial, current, final = ut.init(n,m)
    last_sol = m
    print('states: ', initial, current, final)
    counter = 0

    while not ut.is_final_state(current,n,m):
        # alege i,j
        tower_i, tower_j = random(n)
        print('towers: ', tower_i, tower_j)

        if ut.valid_transition(current, tower_i, tower_j) is True:
            print('is valid')
            current = ut.transition(current, tower_i, tower_j)


            s = '(%d, %d)\n' % (tower_i, tower_j)
            file_out.write(s)
            counter = 0

            #if current[last_sol] == n:
                #last_sol -= 1

        if counter == 100:
            break

        current = ut.transition(current, tower_i, tower_j)
        counter += 1

    print('states: ', initial, current, final)
    file_out.close()
 def signal_handler(signal, frame):
     zk = utils.init()
     election_children = zk.get_children(ELECTION_PATH)
     for child in election_children:
         print("Deleting election child: " + str(child))
         zk.delete(ELECTION_PATH + "/" + child)
         return
示例#37
0
def get_similar():
    utils.init()
    logging.info('loading interests from svd/interests.txt')
    interests = [
            utils.get_interest_by_id(int(line))
            for line in open('svd/interests.txt')
        ]
    index = gensim.similarities.docsim.Similarity.load('svd/lda_index.txt')
    for (i1, similarities) in enumerate(index):
        ordered = []
        for (i2, sim) in enumerate(similarities):
            ordered.append((sim, i2))
        ordered.sort()
        ordered.reverse()
        for (sim, i2) in ordered[:500]:
            print '%s=%s %s=%s %.7f' % (interests[i1].id, interests[i1].text, interests[i2].id, interests[i2].text, sim)
def search():
    if request.method == "GET":
        if 'username' in session:
            return render_template("search.html",username = session["username"])
        else:
            return render_template("search.html")
    else:
        symb = request.form['symb']
        q = utils.init(symb)
        if 'username' in session:
            return render_template("stocks.html",username = session["username"], q=q)
        else:
            return render_template("stocks.html", q=q)
示例#39
0
def describe_lda():
    utils.init()
    model = gensim.models.ldamodel.LdaModel.load('svd/lda.txt')
    def article_name(article_id):
        name = utils.get_article_name(article_id)
        return name.encode('ascii', 'ignore') if name else 'unknown'

#    print 'information about topics:'
#    for i in random.sample(range(model.num_topics), 50):
#        print 'topic %d:' % i
#        topic = model.state.get_lambda()[i]
#        topic = topic / topic.sum() # normalize to probability dist
#        for id in numpy.argsort(topic)[::-1][:10]:
#            score = topic[id]
#            article_id = model.id2word[id]
#            print '\t%.6f: %s' % (score, article_name(article_id))

    dictionary = model.id2word
    interests = list(utils.get_all_interests())
    for i in random.sample(interests, 50):
        article_id1 = utils.get_article_id_for_interest(i)
        if not article_id1:
            continue
        doc = make_doc(i, dictionary)

        doc_lda = model[doc]
        doc_lda.sort(key=lambda pair: pair[1])
        doc_lda.reverse()
        sys.stdout.write('topics for %s (article %s):\n' % (i.text, article_name(article_id1)))
        for (topic_id, topic_score) in doc_lda:
            sys.stdout.write('\t%.6f topic %d:' % (topic_score, topic_id))
            topic = model.state.get_lambda()[topic_id]
            topic = topic / topic.sum() # normalize to probability dist
            for id in numpy.argsort(topic)[::-1][:10]:
                score = topic[id]
                article_id = model.id2word[id]
                sys.stdout.write(', ' + article_name(article_id))
            sys.stdout.write('\n')
示例#40
0
 def studentCode(self, data):
     utils.init()
     coeff = q1.binom(*data)
     self.__postprocess.write('{}\n'.format(len(utils.cache())))
     return coeff
示例#41
0
        # return (0,0)
        # Error is the offset of the bottom trackpoint from middle of frame
        if len(self.trackingpts) == 0: return (0, 0)
        err = self.trackingpts[0][0] - V_WIDTH / 2

        # Proportion term
        pterm = err * self.p
        # Calculate the integral term
        self.errorsum += err
        iterm = self.errorsum * self.i
        # Calculate the derivative term
        dterm = (err - self.preverr) * self.d
        self.preverr = err
        # Drive is the difference in speed between two wheels
        # Drive is positive when turning left
        drive = int(pterm + iterm + dterm)

        lwheel = -(self.basespeed + drive)
        rwheel = -(self.basespeed - drive)

        # Cap the wheel speeds to [-255, 255]
        lwheel = max(min(lwheel, 255), -255)
        rwheel = max(min(rwheel, 255), -255)

        return (lwheel, rwheel)

if __name__ == "__main__":
    init("initiating mobot server")
    server = ThreadedServer(MobotService, port = MOBOT_PORT)
    server.start()
示例#42
0
    def __init__(self, *args, **kwargs):
        rpyc.Service.__init__(self, *args, **kwargs)
        init("mobot service instance created")

        # Status
        self.uptime = time.time()
        self.missionuptime = time.time()
        self._connected = False

        self.filterstate = 0

        self.values = {
            'BRIG': 0, 'CNST': 50, 'BLUR': 4,
            'THRS': 150, 'SIZE': 3, 'CERT': 0.7, 'PTS': 4, 'RADI': 30,
                'A': 0.6, 'B': 0.3, 'C': 0.1,
            'TCHS': 0.5, 'GATG': 14, 'MAXS': 100
        }

        self.status = {
            'STAT': STAT_ONLINE, 'ACTT': 0, 'MIST': 0, 'DELY': 0,
            'GATC': 0, 'SPED': 0, 'PROT': 0, 'CVST': STAT_ONLINE,
            'BATT': 100, 'ADDR': LOCAL_ADDR
        }

        self.emptyfootage = np.zeros((V_HEIGHT, V_WIDTH), dtype = np.uint8)
        self.cntframe = self.emptyfootage
        self.trackingpts = []

        self.vL = 0 # left speed
        self.vR = 0 # right speed
        self.encL = 0 # left encoder
        self.encR = 0 # right encoder

        self.touchcount = 0

        # Main EV3 control threads
        self.loopstop = threading.Event()
        self.loopthd = threading.Thread(target=self.mainloop,
            args=(self.loopstop,))
        self.loopthd.daemon = True

        self.hardwarestop = threading.Event()
        self.hardwarethd = threading.Thread(target=self.update,
            args=(self.hardwarestop,))
        self.hardwarethd.daemon = True

        # Remote video streaming therad
        self.videostop = threading.Event()
        self.videothd = None

        # Enable camera
        self.stream = io.BytesIO()
        init("enabling camera")
        CAMERA.resolution = (V_WIDTH, V_HEIGHT)
        CAMERA.framerate = FRAMERATE
        CAMERA.start_preview()
        time.sleep(0.2)
        # CAMERA.capture_continuous(self.stream, format='jpeg')

        # Image Processor Control Variables
        self.done = False # stops Image Processor
        self.lock = threading.Lock()
        self.pool = [] # Pool of Image Processors

        # PID Control Variable
        self.basespeed = 200
        self.errorsum = 0
        self.preverr = 0
        self.p = 1.2
        self.i = 0
        self.d = 1
示例#43
0
文件: __init__.py 项目: altai/bunch
from lettuce import step, world
from nose.tools import assert_equals, assert_true, assert_false
import utils
import os
import bunch.special

path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
utils.init(dir_path)
config_file = os.path.join(dir_path, "config.yaml")
config = utils.load_yaml_config(config_file)
bunch_working_dir = dir_path

def dump(obj):
  for attr in dir(obj):
    print "obj.%s = %s" % (attr, getattr(obj, attr))

mysql_admin = config['db']['admin']
mysql_admin_pwd = config['db']['admin_pwd']

class step_assert(object):
    def __init__(self, step):
        self.step = step
    
    def assert_true(self, expr):
        msg = 'Step "%s" failed ' % self.step.sentence
        assert_true(expr, msg)
        
    def assert_false(self, expr):
        msg = 'Step "%s" failed ' % self.step.sentence
        assert_false(expr, msg)
示例#44
0
    def __init__(self):
        # Note: This class utilizes pre-coded parameters
        # This class does not have the ability to interact with client
        # Core framework
        init('mobot basic framework initiating...')
        self.uptime = time.time()
        self.missionuptime = time.time()

        self.filterstate = 0

        self.values = {
            'BRIG': 0, 'CNST': 50, 'BLUR': CV_BLUR_FACTOR,
            'THRS': 150, 'SIZE': 3, 'CERT': 0.7, 'PTS': 4, 'RADI': 30,
                'A': 0.6, 'B': 0.3, 'C': 0.1,
            'TCHS': 0.5, 'GATG': 14, 'MAXS': 100
        }

        self.status = {
            'STAT': STAT_ONLINE, 'ACTT': 0, 'MIST': 0, 'DELY': 0,
            'GATC': 0, 'SPED': 0, 'PROT': 0, 'CVST': STAT_ONLINE,
            'BATT': 100, 'ADDR': LOCAL_ADDR
        }

        self.emptyfootage = np.zeros((V_HEIGHT, V_WIDTH), dtype = np.uint8)
        self.cntframe = self.emptyfootage
        self.trackingpts = []

        self.vL = 0 # left speed
        self.vR = 0 # right speed
        self.encL = 0 # left encoder
        self.encR = 0 # right encoder

        self.touchcount = 0

        # Main EV3 control threads
        self.loopstop = threading.Event()
        self.loopthd = threading.Thread(target=self.mainloop,
            args=(self.loopstop,))
        self.loopthd.daemon = True

        self.hardwarestop = threading.Event()
        self.hardwarethd = threading.Thread(target=self.update,
            args=(self.hardwarestop,))
        self.hardwarethd.daemon = True

        # Remote video streaming therad
        # self.videostop = threading.Event()
        # self.videothd = None

        # Enable camera
        self.stream = io.BytesIO()
        init("enabling camera")
        CAMERA.resolution = (V_WIDTH, V_HEIGHT)
        CAMERA.framerate = FRAMERATE
        CAMERA.start_preview()
        time.sleep(0.2)
        # CAMERA.capture_continuous(self.stream, format='jpeg')

        # Image Processor Control Variables
        self.done = False # stops Image Processor
        self.lock = threading.Lock()
        self.pool = [] # Pool of Image Processors

        # PID Control Variable
        self.basespeed = MOTOR_BASESPEED
        self.errorsum = 0
        self.preverr = 0
        self.p = CTRL_P
        self.i = CTRL_I
        self.d = CTRL_D

        # Variable that stores inclination button state
        self.incline_btn_prev = False

        # Loop control state machine
        self.loopstate = processing.BifurcationState(V_WIDTH, V_HEIGHT,
            LOOP_CHOICES)
示例#45
0
        print '-s           Run standalone mode independent of client'
        print '-i           Invert the color seen (we are tracking white line)'
        print '-m <mode>    Select CV Mode being used'
        print '             -> alpha: new histogram model'
        print '             -> beta: obsolete probabilistic tracking model'
        print '------------------ALPHACV EXCLUSIVE OPTIONS------------------'
        print '-c           Save processed image (under alphacv)'
        print '-k           Row skip: allow skipping rows for grouping'
        print '-t           Choose thin: whether we shall favor thinner group'
        sys.exit(2)
    elif opt in ('-s', '--standalone'):
        STANDALONE = True

    elif opt in ('-m', '--mode'):
        if arg == 'alpha':
            init('system running on alpha histogram algorithm.')
            CV_MANUAL_MODE = 'alpha'
        elif arg == 'beta':
            init('system running on beta probabilistic algorithm.')
            CV_MANUAL_MODE = 'beta'
        else:
            warn('not a valid mode, falling back to alpha.')
            CV_MANUAL_MODE = 'alpha'

    elif opt in ('-i', '--inverted'):
        info('inverted cam mode enabled.')
        CV_MANUAL_IRNV = True

    elif opt in ('-k',):
        info('row skipping enabled.')
        ALPHA_CV_ROW_SKIP = True
示例#46
0
文件: worker.py 项目: EFgit/zk
	zk.set(self.worker_id, "non")
	print("Worker %s  created!" %(self.worker_id))
	#3.watch znode
	zk.DataWatch(self.path, self.assignment_change)   
    
    # do something upon the change on assignment
    def assignment_change(self, atask, stat):
	if atask and not atask == "non" :
		#4.5. get task id uppon assignment in workers, get task data in data/yyy
		data_path = DATA_PATH + atask
		if self.zk.exists(data_path) :
			data = self.zk.get(data_path)
			#6. execute task with data
			result = utils.task(data)
			task_path = TASKS_PATH + atask
			task_val = atask + "=" + str(result)
			# set result in task - task completion
			if self.zk.exists(task_path) :
				zk.set(task_path, task_val)
				print("Worker completed task %s with result %s" %(atask, result))
			else :
				print("Task %s not found, maybe the connection  was lost" %(task_path))
			#7. delete assignment
			zk.set(self.path, "non")

if __name__ == '__main__':
    zk = utils.init()    
    worker = Worker(zk)
    while True:
        time.sleep(1)
#!/usr/bin/env python3
import functools, os
import matplotlib.pyplot as plt
import pandas as pd
import utils

utils.init(__file__)

d = utils.skip_comment_char(functools.partial(pd.read_csv, delim_whitespace=True), "imsrg-qdpt/dat_arenergy_by_ml.txt")
d = d[["num_shells", "num_filled", "freq", "ml", "label", "energy"]]
d["method"] = "qdpt"
dq = d

d = pd.read_csv("EOM_IMSRG_qd_attached.dat", delim_whitespace=True)
d["energy"] = d["E(N+1)-E(N)"]
d = d[["shells", "filled", "ML", "omega", "energy"]]
d = d.rename(columns={"shells": "num_shells", "filled": "num_filled", "ML": "ml", "omega": "freq"})
d["label"] = "add"
d["method"] = "eom"
dea = d

d = pd.read_csv("EOM_IMSRG_qd_removed.dat", delim_whitespace=True)
d["energy"] = -d["E(N-1)-E(N)"]
d = d[["shells", "filled", "ML", "omega", "energy"]]
d = d.rename(columns={"shells": "num_shells", "filled": "num_filled", "ML": "ml", "omega": "freq"})
d["label"] = "rm"
d["method"] = "eom"
der = d

d = pd.concat([dq, dea, der])
示例#48
0
def stock_trade():
    trade_order = request.json
    add_1 = trade_order['portofolio_1']
    add_2 = trade_order['portofolio_2']
    model.trade(add_1['user'], add_1['content'], add_2['user'], add_2['content'])
    
@app.route("/stockexchange/distribute", method="POST")
def stock_distribute():
    portofolio_order = request.json
    model.distribute(portofolio_order['user'], portofolio_order['content'])
    
if __name__ == "__main__":
    parser = optparse.OptionParser()
    parser.add_option('--mongo_db', default='naive_stock')
    parser.add_option('--mongo_host', default='localhost')    
    parser.add_option('--model', default='transac_stockmarket.TransacStockmarket')
    parser.add_option('--server', default='tornado')
    parser.add_option('--debug', action='store_true', default=False)
    parser.add_option('--reset', action='store_true', default=False)
    options, remainder = parser.parse_args()
    global model 
    [module, cls] = options.model.split('.', 1)
    m = __import__(module)
    c = getattr(m, cls)
    model = c()
    model.setMongo(options.mongo_host, options.mongo_db) 
    if options.reset: 
        utils.init(model)
    if options.debug: 
        debug(True)
    run(app=app, host="0.0.0.0", port=os.environ.get("PORT", 8080), reloader=True, server=options.server)
示例#49
0
 def studentCode(self, data):
     utils.init()
     q1.printtable(*data)
     return utils.result()
示例#50
0
def test_sample_person_graph():
    # for u in [utils.get_user_by_id(3568), utils.get_user_by_id(16)]:
    # for u in [utils.get_user_by_id(16)]:
    for u in random.sample(utils.get_all_users(), 200):
        print "=" * 80
        print
        print "results for ", u, " ".join([i.text for i in u.interests])
        make_full_person_graph(u)
        print
        print
        print


def test_sample_interest_graph():
    for i in random.sample(utils.get_all_interests(), 100):
        print "=" * 80
        print
        print "results for ", i
        make_full_interest_graph(i)
        print
        print
        print


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    # LOGGER.setLevel(logging.DEBUG)
    utils.init()
    # test_sample_interest_graph()
    test_sample_person_graph()