コード例 #1
0
 def create_timeline(self):
     self.statuses = twitter.twitter.get_direct_messages(count=200)
     self.buffer = buffer.buffer("Direct Messages", self.statuses)
     buffer.buffers.append(self.buffer)
     interface.window.list.Insert(
         buffer.buffers[len(buffer.buffers) - 1].name,
         interface.window.list.GetCount())
コード例 #2
0
 def create_timeline(self):
     self.statuses = twitter.twitter.get_favorites(count=200,
                                                   tweet_mode='extended')
     self.buffer = buffer.buffer("Likes", self.statuses)
     buffer.buffers.append(self.buffer)
     interface.window.list.Insert(
         buffer.buffers[len(buffer.buffers) - 1].name,
         interface.window.list.GetCount())
コード例 #3
0
 def create_timeline(self):
     self.statuses = twitter.twitter.get_home_timeline(
         count=200, tweet_mode='extended')
     self.buffer = buffer.buffer("Home", self.statuses)
     buffer.buffers.insert(0, self.buffer)
     interface.window.list.Insert(buffer.buffers[0].name, 0)
     twitterstream = HomeStream(twitter.apikey, twitter.apisecret,
                                config.appconfig['general']['key'],
                                config.appconfig['general']['secret'])
     twitterstream.user()
コード例 #4
0
 def create_timeline(self):
     self.statuses = twitter.twitter.get_mentions_timeline(
         count=200, tweet_mode='extended')
     self.buffer = buffer.buffer("Mentions", self.statuses)
     buffer.buffers.append(self.buffer)
     interface.window.list.Insert(
         buffer.buffers[len(buffer.buffers) - 1].name,
         interface.window.list.GetCount())
     twitterstream = MentionsStream(twitter.apikey, twitter.apisecret,
                                    config.appconfig['general']['key'],
                                    config.appconfig['general']['secret'])
     twitterstream.statuses.filter(track="@" + twitter.screenname)
コード例 #5
0
def actor_run(actor_id, env_id, traj_length=100, log=False):
    episode = 0
    weight_reward = None
    env = gym.make(env_id)
    actor_buffer = buffer()
    agent = actor_critic_agent(env, actor_buffer)
    writer = SummaryWriter('./log/actor_{}'.format(actor_id))
    channel = grpc.insecure_channel('localhost:43231')

    params = get_parameter(channel)
    params = pickle.loads(params)
    agent.load_state_dict(params)

    while True:
        obs = env.reset()
        total_reward = 0
        while True:
            action = agent.net.act(torch.FloatTensor(np.expand_dims(
                obs, 0))).item()
            behavior_policy, _ = agent.net.forward(
                torch.FloatTensor(np.expand_dims(obs, 0)))
            next_obs, reward, done, info = env.step(action)
            actor_buffer.store(obs, action, reward, done,
                               behavior_policy.squeeze(0).detach().numpy())
            total_reward += reward
            obs = next_obs
            if done:
                if weight_reward:
                    weight_reward = 0.99 * weight_reward + 0.01 * total_reward
                else:
                    weight_reward = total_reward
                episode += 1
                print('episode: {}  weight_reward: {:.2f}  reward: {:.2f}'.
                      format(episode, weight_reward, total_reward))
                if log:
                    writer.add_scalar('reward', total_reward, episode)
                    writer.add_scalar('weight_reward', weight_reward, episode)
            if len(actor_buffer) == traj_length:
                traj_data = actor_buffer.get_json_data()
                send_trajectory(channel, trajectory=traj_data)
                params = get_parameter(channel)
                params = pickle.loads(params)
                agent.load_state_dict(params)
            if done:
                break
コード例 #6
0
 def __init__(self,
              env,
              learning_rate=1e-3,
              rho=1,
              c=1,
              gamma=0.99,
              entropy_weight=0.05,
              batch_size=32,
              capacity=1000,
              value_weight=0.5,
              save_freq=20,
              log=False,
              device='cpu'):
     self.buffer = buffer(capacity)
     self.agent = actor_critic_agent(env, self.buffer, learning_rate, rho,
                                     c, gamma, entropy_weight, value_weight,
                                     device)
     self.batch_size = batch_size
     self.save_freq = save_freq
     self.log = log
     self.writer = SummaryWriter('./log/learner')
コード例 #7
0
    def __init__(self, flags):

        self.nIter_ = 0
        self.avgLoss = {
            'dA': 0,
            'dB': 0,
            'gan_gA': 0,
            'gan_gB': 0,
            'cycle_forw': 0,
            'cycle_back': 0,
            #'gp_dA' : 0,
            #'gp_dB' : 0,
            'idnt_gA': 0,
            'idnt_gB': 0,
        }
        self.flags_ = flags
        self.device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.ganLossType_ = flags.glt

        # Identity loss can be used only if the A and B have the same number channels
        if flags.l_idnt > 0:
            assert (flags.ncA == flags.ncB)

        self.gA = generator(in_channels=flags.ncA,
                            out_channels=flags.ncB,
                            nBlocks=flags.nbl,
                            nChanFirstConv=flags.ncFirstConv,
                            dropout=flags.dropout)
        self.gB = generator(in_channels=flags.ncB,
                            out_channels=flags.ncA,
                            nBlocks=flags.nbl,
                            nChanFirstConv=flags.ncFirstConv,
                            dropout=flags.dropout)
        self.dA = discriminator(in_channels=flags.ncB,
                                nChanFirstConv=flags.ncFirstConv)
        self.dB = discriminator(in_channels=flags.ncA,
                                nChanFirstConv=flags.ncFirstConv)

        self.gA.to(self.device_)
        self.gB.to(self.device_)
        self.dA.to(self.device_)
        self.dB.to(self.device_)

        init_weights(self.gA, flags.init_type, flags.init_scale)
        init_weights(self.gB, flags.init_type, flags.init_scale)
        init_weights(self.dA, flags.init_type, flags.init_scale)
        init_weights(self.dB, flags.init_type, flags.init_scale)

        self.buffer_fakeA = buffer()
        self.buffer_fakeB = buffer()

        #TODO 'was' mode of gan loss needs implementation of gradient penalty as well

        if self.ganLossType_ == 'lse':  #Least square loss
            self.ganCriterion = nn.MSELoss()
        elif self.ganLossType_ == 'bce':  #binary cross entropy loss i.e. binary classification
            self.ganCriterion = nn.BCEWithLogitsLoss()
        elif self.ganLossType_ == 'was':  #Wasserstein GAN
            self.ganCriterion = None
        else:
            raise NotImplementedError(
                'Specified gan loss has not been implemented')

        self.cycleCriterion = nn.L1Loss()
        self.idntCriterion = nn.L1Loss()
        self.optimizerG = torch.optim.Adam(itertools.chain(
            self.gA.parameters(), self.gB.parameters()),
                                           lr=flags.lr,
                                           betas=(0, 0.999))
        self.optimizerD = torch.optim.Adam(itertools.chain(
            self.dA.parameters(), self.dB.parameters()),
                                           lr=flags.lr,
                                           betas=(0, 0.999))
コード例 #8
0
ファイル: kernel.py プロジェクト: smurfix/twistfuse
	def from_param_head(cls, msg):
		limit = cls.calcsize()
		zero = msg.index('\x00', limit)
		return cls(msg[:limit]), buffer(msg,limit,zero-limit), buffer(msg,zero+1)
コード例 #9
0
ファイル: kernel.py プロジェクト: smurfix/twistfuse
	def from_head(cls, msg):
		limit = cls.calcsize()
		return cls(msg[:limit]), buffer(msg,limit)
コード例 #10
0
ファイル: kernel.py プロジェクト: smurfix/twistfuse
	def from_param2(cls, msg):
		limit = cls.calcsize()
		zero1 = msg.index('\x00', limit)
		zero2 = msg.index('\x00', zero1+1)
		return cls(msg[:limit]), buffer(msg,limit,zero1-limit), buffer(msg,zero1+1,zero2-zero1-1)
コード例 #11
0
    exec("t1 = ccxt.{}({{'apiKey': '{}', 'secret': '{}'}})".format(
        config.NAME1, config.APIKEY1, config.SECKEY1))
    exec("t2 = ccxt.{}({{'apiKey': '{}', 'secret': '{}'}})".format(
        config.NAME2, config.APIKEY2, config.SECKEY2))

    # インスタンスに取引所の名前を追加
    t1.name = config.NAME1
    t2.name = config.NAME2

    # 取引通貨などの情報をまとめたインスタンスを作成
    info_set = tools.information(config.CRYPTO_BASE, config.CRYPTO_ALT,
                                 config.PASSWORDS, config.BNBBUY,
                                 config.BIXBUY)

    # 取引所インスタンスの修飾(取引所の情報の取得&取引所ごとに必要な設定を行う(buffer.py参照))
    t1 = buffer.buffer(t1, info_set)
    t2 = buffer.buffer(t2, info_set)

    # もろもろをまとめたインスタンスを作成
    # インスタンス作成時に各種情報を出力・API経由でBalanceが取得できるのを確認
    ex = tools.exchange(t1, t2, info_set, l)

    # 閾値の設定
    thrd_up = config.threshold_up
    thrd_down = config.threshold_down

    # chrateは、基軸通貨とペアにした通貨のどちらを増やすかの判断に使用(残高の持ち方の最適化まわりの問題)
    # order_upとdownの第三引数に関連しています:これらを0にすると基軸通貨だけが増えるように(ペアにした通貨が手数料で減るのでなんらかの調整が必要)
    chrate_up = thrd_up - 1.001
    chrate_down = thrd_down - 1.001
コード例 #12
0
# Step 1: rasterize the ground truth shapefile
gt_tif_path = utils_shp.vector2raster(gt_path, map_tif_path)
gt_png_path = utils_shp.tif2png(gt_tif_path)

# Step 2: load the bounding box coordinates to remove the white borders of the map
bbox_file = os.path.join(data_dir, 'bbox.txt')
points = np.loadtxt(bbox_file, dtype='int32', delimiter=',')
start_point, end_point = points[0], points[1]
print(start_point, end_point)

# Step 3: skeletonize the segmentation results
img_copy, pred_ske = make_skeleton(pred_path)
pred_ske = pred_ske.astype('uint')

# Step 4: buffer the segmentation results and ground truth
pred_buffer = buffer(pred_ske, buffer_size=buffer_size)
gt_map = cv2.imread(gt_png_path, 0) / 255
gt_buffer = buffer(gt_map, buffer_size=buffer_size)

# Step 5 calculate correctness and completeness
overlap_map = gt_buffer * pred_ske
fp_map = pred_ske - overlap_map

tp = np.count_nonzero(overlap_map)
fp = np.count_nonzero(fp_map)

correctness = tp / (tp + fp)
print('correctness = ', correctness)

overlap_comp_map = gt_map * pred_buffer