Ejemplo n.º 1
0
def groupGeneratedLocators():
    log('selecting created locators')
    
    locs = cmds.ls(type="locator")   
    if locs:
        cmds.select(locs)
        cmds.group(name="debug_locs")
Ejemplo n.º 2
0
def cleanGeneratedLocators():
    log('cleaning old locators')
    
    locs = cmds.ls("locator*")
    if locs:
        cmds.delete(locs)
    dbg = cmds.ls("debug_locs")
    if dbg:
        cmds.delete(dbg)
Ejemplo n.º 3
0
def launch_proj():
    camera    = 'camera2'
    particles = 'Agisoft_Ptc_1'
    step      = 10
    scale     = 500.0
    
    print '*' * 80
    
    log('PARTICLES PROJECTION TEST')
    log('%s / %s (step: %d, scale: %f)' % (camera, particles, step, scale))
    cleanGeneratedLocators()
    
    points = projcam.getParticlesProjection(camera, particles, step)
    
    '''
    log('create preview locators')
    for point in points:
        point.x = (1 - point.x) - 1
        cmds.spaceLocator(position=[point.x * scale, 0, point.y * scale])
    
    log('select and group preview locators')
    groupGeneratedLocators()
    '''
    log('PROJECTION DONE !')
    print '*' * 80
Ejemplo n.º 4
0
def walk_proj():
    camera    = 'camera2'
    particles = 'Agisoft_Ptc_1'
    step      = 100
    scale     = 500.0
    
    print '*' * 80    
    log('PARTICLES WALK TEST')
    
    coords = projcam.walkParticlesProjection(camera, particles, step)
    pid    = 0
    frm    = 5
    
    for plist in coords:
        log("got %d frames for point %d" % (len(plist), pid))
        log("position at frame %d : %f %f" % (frm, plist[frm].x, plist[frm].y))
    
    log('WALK DONE !')
    print '*' * 80
Ejemplo n.º 5
0
    def test_test(self):
        """
        Test on testing set.
        """

        num_batches = int(
            math.ceil(self.test_images.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.test_images.shape[0])

            batch_images = common.torch.as_variable(
                self.test_images[b_start:b_end], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            # Important to get the correct codes!
            output_codes, output_logvar = self.encoder(batch_images)
            output_images = self.decoder(output_codes)
            e = self.reconstruction_loss(batch_images, output_images)
            self.reconstruction_error += e.data

            self.code_mean += torch.mean(output_codes).item()
            self.code_var += torch.var(output_codes).item()

            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            self.pred_images = common.numpy.concatenate(
                self.pred_images, output_images)

            output_codes = output_codes.cpu().detach().numpy()
            self.pred_codes = common.numpy.concatenate(self.pred_codes,
                                                       output_codes)

            if b % 100 == 50:
                log('[Testing] %d' % b)

        assert self.pred_images.shape[0] == self.test_images.shape[
            0], 'computed invalid number of test images'
        if self.args.reconstruction_file:
            utils.write_hdf5(self.args.reconstruction_file, self.pred_images)
            log('[Testing] wrote %s' % self.args.reconstruction_file)

        if self.args.test_theta_file:
            assert self.pred_codes.shape[0] == self.test_images.shape[
                0], 'computed invalid number of test codes'
            utils.write_hdf5(self.args.test_theta_file, self.pred_codes)
            log('[Testing] wrote %s' % self.args.test_theta_file)

        threshold = 0.9
        percentage = 0
        # values = numpy.linalg.norm(pred_codes, ord=2, axis=1)
        values = numpy.max(numpy.abs(self.pred_codes), axis=1)

        while percentage < 0.9:
            threshold += 0.1
            percentage = numpy.sum(values <= threshold) / float(
                values.shape[0])
            log('[Testing] threshold %g percentage %g' %
                (threshold, percentage))
        log('[Testing] taking threshold %g with percentage %g' %
            (threshold, percentage))

        if self.args.output_directory and utils.display():
            # fit = 10
            # plot_file = os.path.join(self.args.output_directory, 'test_codes')
            # plot.manifold(plot_file, pred_codes[::fit], None, None, 'tsne', None, title='t-SNE of Test Codes')
            # log('[Testing] wrote %s' % plot_file)

            for d in range(1, self.pred_codes.shape[1]):
                plot_file = os.path.join(self.args.output_directory,
                                         'test_codes_%s' % d)
                plot.scatter(
                    plot_file,
                    self.pred_codes[:, 0],
                    self.pred_codes[:, d], (values <= threshold).astype(int),
                    ['greater %g' % threshold,
                     'smaller %g' % threshold],
                    title='Dimensions 0 and %d of Test Codes' % d)
                log('[Testing] wrote %s' % plot_file)

        self.reconstruction_error /= num_batches
        log('[Testing] reconstruction error %g' % self.reconstruction_error)
Ejemplo n.º 6
0
 def sendAll(self, protocol_obj):
     log(
         u'[try send]game[%s] exit player %s' %
         (self.roomId, self.exitPlayers), LOG_LEVEL_RELEASE)
     self.server.send(self.getOnlinePlayers(), protocol_obj)
     self.saveSendData(protocol_obj)
Ejemplo n.º 7
0
 def sendOne(self, player, protocol_obj):
     if player.chair not in self.exitPlayers:
         log(u'[try send]account[%s]' % (player.nickname),
             LOG_LEVEL_RELEASE)
         self.server.sendOne(player, protocol_obj)
     self.saveSendData(protocol_obj, peer=player)
Ejemplo n.º 8
0
    def balance(self, isEndGame=False, isSave=True):
        """
        结算并结束游戏
        """
        log(u'[on balance]room[%s] curGameCount[%s] gameTotalCount[%s] isEndGame[%s] isSave[%s].'\
                %(self.roomId, self.curGameCount, self.gameTotalCount, isEndGame, isSave), LOG_LEVEL_RELEASE)
        self.doBeforeBalance()
        if not self.setEndTime:
            self.setEndTime = self.server.getTimestamp()
        if not self.gameEndTime:
            self.gameEndTime = self.server.getTimestamp()

        if not self.isUseRoomCards and self.curGameCount == 1 and not self.isDebug and not self.isParty and isSave:
            self.server.useRoomCards(self)

        #检测局数是否直接结束全部
        if self.curGameCount + 1 > self.gameTotalCount:
            log(u'[on balance]room[%s] curGameCount[%s] > gameTotalCount[%s].'\
                %(self.roomId, self.curGameCount, self.gameTotalCount), LOG_LEVEL_RELEASE)
            isEndGame = True

        #打包小局数据
        resp = poker_pb2.S_C_Balance()
        resp.isNormalEndGame = self.isGameEnd
        if isSave:
            for player in self.getPlayers():
                self.calcBalance(player)

        if self.stage != GAME_READY:
            self.fillCommonData(resp)
        for player in self.getPlayers():
            if self.stage != GAME_READY:  #局间不显示单局结算数据
                userData = resp.setUserDatas.add()
                pbBalanceData(player, userData)
                self.fillBalanceData(player, userData)
                player.upTotalUserData()
            if isEndGame:
                totalUserData = resp.gameUserDatas.add()
                pbBalanceData(player, totalUserData)
                self.fillTotalBalanceData(player, totalUserData)
                totalUserData.roomSetting = self.ruleDescs
        self.oldBalanceData = copy.deepcopy(resp)
        log(u'[on balance] resp[%s]' % (resp), LOG_LEVEL_RELEASE)
        self.sendAll(resp)

        #每局数据存盘
        if isSave:
            self.server.savePlayerBalanceData(self, resp.setUserDatas)
            saveResp = poker_pb2.S_C_RefreshData()
            saveResp.result = True
            self.server.tryRefresh(self, player, saveResp)
            self.replayRefreshData = saveResp.SerializeToString()
            self.isSaveGameData = True
            self.gamePlayedCount += 1
        if isEndGame:
            if self.isSaveGameData:
                #总数据存盘
                log(u'[on balance]room[%s] save all data.' % (self.roomId),
                    LOG_LEVEL_RELEASE)
                self.server.savePlayerTotalBalanceData(self,
                                                       resp.gameUserDatas)
            self.removeRoom()
        else:
            #切换下一局
            self.resetSetData()
            self.isEnding = True
            self.stage = GAME_READY
            # self.onSetStart(self.players[OWNNER_SIDE])
            self.setCounter([self.dealerSide], self.balanceCounterMs,
                            self.onGameStartTimeout)
Ejemplo n.º 9
0
def print_status():
    log(global_data.log_path, 'info', end_time, '*' * 100)
    log(global_data.log_path, 'info', end_time, '本次下载开始时间: %s' % start_time)
    log(global_data.log_path, 'info', end_time, '本次下载结束时间: %s' % end_time)
    log(global_data.log_path, 'info', end_time, '本次获取任务数量: %s' % len(global_data.data_id_list))
    log(global_data.log_path, 'info', end_time, '本次获取任务数量: %s' % len(global_data.data_id_list))
    log(global_data.log_path, 'info', end_time, '本次获取下载成功数量: %s' % len(global_data.data_id_success_list))
    log(global_data.log_path, 'info', end_time, '本次获取下载失败数量: %s' % len(global_data.data_id_fail_list))
    log(global_data.log_path, 'info', end_time, '总任务列表如下:')
    log(global_data.log_path, 'info', end_time, global_data.data_id_list)
    log(global_data.log_path, 'info', end_time, '下载失败任务列表如下:')
    log(global_data.log_path, 'info', end_time, global_data.data_id_fail_list)
    log(global_data.log_path, 'info', end_time, '下载成功任务列表如下:')
    log(global_data.log_path, 'info', end_time, global_data.data_id_success_list)
Ejemplo n.º 10
0
 def test_IlluminationCommandController(self):
     log('开始跑测试用例')
     Request().send_request(apifile_dir + 'IlluminationCommandController.xlsx', 'Sheet1')  ##只需要改动 xxx.xlsx文件即可
Ejemplo n.º 11
0
from peter.wang_test import WangTest

# data set selection
dataset_list = ["nasa-turbofan", "weather", "bmw-rex", "bmw-cells", "nasa-phm"]
dataset_list = ["bmw-cells"]

hy_param_perc_score_min_phm = 99.705997648  # percentile score based on C_min = -300 of wang et al. (2008) paper
hy_param_perc_score_max_phm = 2.61226534257  # percentile score based on C_max = -5 of wang et al. (2008) paper
hy_param_max_cycles_score_phm = 1.06741573034  # percentage/100 based on own testing
hy_param_min_num_ruls_candi = 15  #30
hy_param_min_num_ruls_out_1 = 10  #25
hy_param_scaling_curve_fit = 1.1207865168539326

## main ##
for dataset in dataset_list:
    log("#################################### loading data ##################################",
        force=True)
    if dataset == "nasa-turbofan":
        dataset_subtype = "all"
        train, test, final_test = DataSource.load_turbofan()
    elif dataset == "nasa-phm":
        dataset_subtype = "all"
        train, test, final_test = DataSource.load_phm()
    elif dataset == "bmw-rex":
        dataset_subtype = "test"
        train, test = DataSource.load_bmw_rex()
    elif dataset == "bmw-cells":
        dataset_subtype = "test"
        train, test = DataSource.load_bmw_cells()
    elif dataset == "weather":
        dataset_subtype = "test"
        train, test = DataSource.load_weather()
Ejemplo n.º 12
0
 def __init__(self):
     Appium.init_driver()
     self.driver = Appium.get_driver()
     self.driver.implicitly_wait(10)  # 隐式等待10秒
     self.logger = log()
Ejemplo n.º 13
0
    def test(self):
        """
        Test the model.
        """

        self.model.eval()
        log('[Training] %d set classifier to eval' % self.epoch)
        assert self.model.training is False

        loss = error = perturbation_loss = perturbation_error = 0
        num_batches = int(
            math.ceil(self.args.test_samples / self.args.batch_size))

        for b in range(num_batches):
            perm = numpy.take(range(self.args.test_samples),
                              range(b * self.args.batch_size,
                                    (b + 1) * self.args.batch_size),
                              mode='clip')
            batch_images = common.torch.as_variable(self.test_images[perm],
                                                    self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[perm],
                                                     self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_classes = self.model(batch_images)
            e = self.loss(batch_classes, output_classes)
            loss += e.item()
            a = self.error(batch_classes, output_classes)
            error += a.item()

            images = batch_images.data.cpu().numpy()
            perturbations = common.numpy.uniform_ball(
                images.shape[0],
                numpy.prod(images.shape[1:]),
                epsilon=self.args.epsilon,
                ord=self.norm)
            perturbations = perturbations.reshape(images.shape)

            perturbations = numpy.minimum(
                numpy.ones(images.shape) - images, perturbations)
            perturbations = numpy.maximum(
                numpy.zeros(images.shape) - images, perturbations)

            perturbations = perturbations.astype(numpy.float32)
            batch_perturbed_images = batch_images + common.torch.as_variable(
                perturbations, self.args.use_gpu)

            output_classes = self.model(batch_perturbed_images)

            e = self.loss(batch_classes, output_classes)
            perturbation_loss += e.item()

            e = self.error(batch_classes, output_classes)
            perturbation_error += e.item()

        loss /= num_batches
        error /= num_batches
        perturbation_loss /= num_batches
        perturbation_error /= num_batches
        log('[Training] %d: test %g (%g) %g (%g)' %
            (self.epoch, loss, error, perturbation_loss, perturbation_error))

        num_batches = int(
            math.ceil(self.train_images.shape[0] / self.args.batch_size))
        iteration = self.epoch * num_batches
        self.test_statistics = numpy.vstack((
            self.test_statistics,
            numpy.array([[
                iteration,  # iterations
                iteration * (1 + self.args.max_iterations) *
                self.args.batch_size,  # samples seen
                min(num_batches, iteration) * self.args.batch_size +
                iteration * self.args.max_iterations *
                self.args.batch_size,  # unique samples seen
                loss,
                error,
                perturbation_loss,  # perturbation loss
                perturbation_error,  # perturbation error (1-accuracy)
            ]])))
Ejemplo n.º 14
0
#!/usr/bin/env python
# encoding: utf-8
"""
@author: mayuyang
@time: 2020/11/20 15:34
@desc:
"""
import yaml
from common import log

with open("test.yaml",encoding="utf-8") as f:
    yaml_log = yaml.load(f)
    log.log().info("test yaml")
    log.log().info(yaml_log)

with open("customer/user.yaml",encoding="utf-8") as f:
    yaml_log = yaml.load(f)
    log.log().info("test user yaml")
    log.log().info(yaml_log)
    def train(self, epoch):
        """
        Train for one epoch.

        :param epoch: current epoch
        :type epoch: int
        """

        assert self.encoder is not None and self.decoder is not None
        assert self.scheduler is not None

        self.auto_encoder.train()
        log('[Training] %d set auto encoder to train' % epoch)
        self.encoder.train()
        log('[Training] %d set encoder to train' % epoch)
        self.decoder.train()
        log('[Training] %d set decoder to train' % epoch)

        num_batches = int(math.ceil(self.train_images.shape[0]/self.args.batch_size))
        assert self.encoder.training is True

        permutation = numpy.random.permutation(self.train_images.shape[0])
        permutation = numpy.concatenate((permutation, permutation[:self.args.batch_size]), axis=0)

        for b in range(num_batches):
            self.scheduler.update(epoch, float(b)/num_batches)

            perm = permutation[b * self.args.batch_size: (b + 1) * self.args.batch_size]
            batch_images = common.torch.as_variable(self.train_images[perm], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_images, output_mu, output_logvar = self.auto_encoder(batch_images)
            reconstruction_loss = self.reconstruction_loss(batch_images, output_images)

            self.scheduler.optimizer.zero_grad()
            latent_loss = self.latent_loss(output_mu, output_logvar)
            loss = self.args.beta*reconstruction_loss + latent_loss
            loss.backward()
            self.scheduler.optimizer.step()
            reconstruction_loss = reconstruction_loss.item()
            latent_loss = latent_loss.item()

            reconstruction_error = self.reconstruction_error(batch_images, output_images)
            reconstruction_error = reconstruction_error.item()

            iteration = epoch*num_batches + b + 1
            self.train_statistics = numpy.vstack((self.train_statistics, numpy.array([
                iteration,
                iteration * self.args.batch_size,
                min(num_batches, iteration),
                min(num_batches, iteration) * self.args.batch_size,
                reconstruction_loss,
                reconstruction_error,
                latent_loss,
                torch.mean(output_mu).item(),
                torch.var(output_mu).item(),
                torch.mean(output_logvar).item(),
            ])))

            skip = 10
            if b%skip == skip//2:
                log('[Training] %d | %d: %g (%g) %g %g %g %g' % (
                    epoch,
                    b,
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 4]),
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 5]),
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 6]),
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 7]),
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 8]),
                    numpy.mean(self.train_statistics[max(0, iteration-skip):iteration, 9]),
                ))
    def main(self):
        """
        Main method.
        """

        theta = utils.read_hdf5(self.args.theta_file)
        log('[Data] read %s' % self.args.theta_file)

        if theta.shape[1] == 1:
            log('[Data] theta min: [%f]' % (
                numpy.min(theta[:, 0])))
            log('[Data] theta max: [%f]' % (
                numpy.max(theta[:, 0])))
        elif theta.shape[1] == 2:
            log('[Data] theta min: [%f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1])))
            log('[Data] theta max: [%f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1])))
        elif theta.shape[1] == 3:
            log('[Data] theta min: [%f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2])))
            log('[Data] theta max: [%f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2])))
        elif theta.shape[1] == 4:
            log('[Data] theta min: [%f, %f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2]),
                numpy.min(theta[:, 3])))
            log('[Data] theta max: [%f, %f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2]),
                numpy.max(theta[:, 3])))
        elif theta.shape[1] == 6:
            log('[Data] theta min: [%f, %f, %f, %f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2]),
                numpy.min(theta[:, 3]), numpy.min(theta[:, 4]), numpy.min(theta[:, 5])))
            log('[Data] theta max: [%f, %f, %f, %f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2]),
                numpy.max(theta[:, 3]), numpy.max(theta[:, 4]), numpy.max(theta[:, 5])))

        codes = utils.read_hdf5(self.args.codes_file)
        log('[Data] read %s' % self.args.codes_file)
        print(codes)
Ejemplo n.º 17
0
    def genFishsT(self, genFishTimestamp):
        newFishs = []
        nowTime = int(time.time()) * 1000
        cooldownTimes = []
        startDate = datetime(2018, 2, 16).date()
        testDate = datetime(2018, 2, 9).date()
        endDate = datetime(2018, 2, 20).date()
        curDate = datetime.now().date()
        for level, levelData in enumerate(FISH_LEVELS_DATA):
            if level == 34:
                if curDate != testDate:
                    if curDate < startDate or curDate > endDate:
                        continue

            #鱼按出现几率分配出生个数
            count = self.level2appearCount[level]
            # if level in self.rmLevel2Count: #移除的鱼加回来
            # count = max(self.rmLevel2Count[level], count)
            # if count > levelData.limit_count:
            # count = levelData.limit_count
            # self.rmLevel2Count[level] -= count
            # self.rmLevel2Count[level] = max(self.rmLevel2Count[level], 0)
            fishDatas = []
            curCount = 0
            # if levelData.limit_count and (level in NEED_LIMIT_LEVELS) and (len(self.level2Fishs[level]) >= levelData.limit_count):
            # continue
            if not count:
                continue
            while True:
                # print(u"现在等级:%s" % level)
                tmpFishs = self.game.server.fishGenerator.getFishData(level)
                curCount += len(tmpFishs)
                if curCount > count:
                    fishDatas.append(tmpFishs[:curCount - count])
                    break
                else:
                    fishDatas.append(tmpFishs)
                    if curCount == count:
                        break
            newFishs.extend(fishDatas)

        random.shuffle(newFishs)

        totalCount = len(newFishs)
        sliceMs = FISH_GEN_BATCH_MS / totalCount
        genFishProto = S_C_GenerateFish()
        idx = 0
        #firstFish = None
        tempFish = []
        level2Count = {}
        index2limitCount = {}
        for fishs in newFishs:
            for fishData in fishs:
                level = fishData.level
                fishTime = genFishTimestamp + sliceMs * idx + fishData.timestampOffset
                fishLevelData = FISH_LEVELS_DATA[level]

                #多鱼限制
                isBeLimit = False
                for index, fishLevelsLimit in enumerate(
                        FISH_LEVELS_LIMIT_DATA):
                    fishLevels = fishLevelsLimit.fishLevels
                    min_limit_time = fishLevelsLimit.death_min_limit_time * 1000
                    if index in self.index2deathTimes:  #死亡的鱼数
                        for deathTime in self.index2deathTimes[index][:]:
                            if (fishTime - deathTime) >= min_limit_time:
                                self.index2deathTimes[index].remove(deathTime)
                        waitFishCount = len(self.index2deathTimes[index])
                    else:
                        waitFishCount = 0
                    if level in fishLevels:
                        if index not in index2limitCount:
                            index2limitCount[index] = 0
                        if fishLevelsLimit.limit_count and index2limitCount[
                                index] >= fishLevelsLimit.limit_count - waitFishCount:
                            isBeLimit = True
                            break
                        if not index2limitCount[index]:
                            lastAddTime = self.getLastAddTime4fishs(fishLevels)
                            min_limit_time = fishLevelsLimit.min_limit_time * 1000
                            max_limit_time = fishLevelsLimit.max_limit_time * 1000
                            if (fishTime -
                                    lastAddTime) < min_limit_time:  #等待时间不足
                                # fishTime = min(genFishTimestamp + fishData.timestampOffset + FISH_GEN_BATCH_MS,\
                                # lastAddTime + min_limit_time)
                                # if (fishTime - lastAddTime) < min_limit_time:
                                isBeLimit = True
                                break
                            # if max_limit_time and (fishTime - lastAddTime) > max_limit_time: #等待时间超出
                            # fishTime = max(genFishTimestamp, fishTime - max_limit_time * 1000)
                        index2limitCount[index] += 1
                if isBeLimit:
                    break

                max_together_count = fishLevelData.max_together_count
                isTogether = False
                if level not in self.level2lastAddTime:
                    self.level2lastAddTime[level] = 0
                if max_together_count:  #可同时刷出多只
                    if level not in level2Count:
                        level2Count[level] = 0
                    elif level2Count[level] + 1 < (random.random() *
                                                   max_together_count):
                        fishTime = self.level2lastAddTime[level] + 100
                        isTogether = True
                    level2Count[level] += 1
                if not isTogether:  #时间限制
                    lastAddTime = self.level2lastAddTime[level]
                    min_limit_time = fishLevelData.min_limit_time * 1000
                    max_limit_time = fishLevelData.max_limit_time * 1000
                    if (fishTime - lastAddTime) < min_limit_time:  #等待时间不足
                        # fishTime = min(genFishTimestamp + fishData.timestampOffset + FISH_GEN_BATCH_MS, lastAddTime + min_limit_time)
                        # if (fishTime - lastAddTime) < min_limit_time:
                        if level in level2Count:
                            level2Count[level] -= 1
                        break
                    # if max_limit_time and (fishTime - lastAddTime) > max_limit_time: #等待时间超出
                    # fishTime = max(genFishTimestamp, fishTime - max_limit_time * 1000)
                self.level2lastAddTime[level] = fishTime  #记录每种鱼的最后添加时间
                fish = Fish(self.genId(), fishData.idx, fishData.level, fishData.order, fishTime, fishData.initRot, \
                    fishData.x, fishData.y, fishData.duration, fishData.multi, fishData.rate, fishData.dice, fishData.route)
                #if not firstFish:
                #    firstFish = fish
                if fish.dice:
                    log(u'gen fish id[%s] idx[%s] dice[%s] level[%s]' %
                        (fish.id, fish.idx, fish.dice, fish.level))
                self.add(fish)
                tempFish.append(fish)
                pbAppendFishList(genFishProto.fishs, fish,
                                 self.game.server.showFishHitCoiunt)
            idx += 1
        # print(u"生成了鱼的数量:%s" % len(tempFish))
        self.game.sendAll(genFishProto)
Ejemplo n.º 18
0
import datetime
from common.log import log
import xlrd, requests, unittest, time, json, pymysql

log = log().ll("./logs/api_log.log")

# now_time = datetime.datetime.now() 获取时间
# t1 = (now_time + datetime.timedelta(seconds=+3)).strftime("%Y-%m-%d %H:%M:%S") # 当前时间+3
# t2 = (now_time + datetime.timedelta(seconds=+15)).strftime("%Y-%m-%d %H:%M:%S") # 当前时间+15


# 接口测试类
class base():

    #  初始化方法
    def __init__(self, s_url, s_data, filepath, param_sheet, assert_sheet):
        """
        初始化方法
        :param self.dir_case: 参数文件存放路径
        :param s_url: 登录接口url
        :param s_data: 登录接口的参数
        :param url: 请求地址
        :param filepath:  存放参数的Excel文件名及后缀
        :param param_sheet: 要遍历的参数sheet页下标
        :param asset: 要遍历的断言sheet页下标
        """
        self.dir_case = './parameter/' + filepath
        self.s_url = s_url
        self.s_data = s_data
        self.filepath = filepath
        self.param = param_sheet
Ejemplo n.º 19
0
    def train(self):
        """
        Train adversarially.
        """

        num_batches = int(
            math.ceil(self.train_images.shape[0] / self.args.batch_size))
        permutation = numpy.random.permutation(self.train_images.shape[0])
        perturbation_permutation = numpy.random.permutation(
            self.train_images.shape[0])
        if self.args.safe:
            perturbation_permutation = perturbation_permutation[
                self.train_valid == 1]
        else:
            perturbation_permuation = permutation

        for b in range(num_batches):
            self.scheduler.update(self.epoch, float(b) / num_batches)

            self.model.eval()
            assert self.model.training is False
            objective = self.objective_class()
            split = self.args.batch_size // 2

            if self.args.full_variant:
                perm = numpy.concatenate(
                    (numpy.take(permutation,
                                range(b * self.args.batch_size,
                                      b * self.args.batch_size + split),
                                mode='wrap'),
                     numpy.take(perturbation_permutation,
                                range(b * self.args.batch_size + split,
                                      (b + 1) * self.args.batch_size),
                                mode='wrap')),
                    axis=0)
                batch_images = common.torch.as_variable(
                    self.train_images[perm], self.args.use_gpu)
                batch_classes = common.torch.as_variable(
                    self.train_codes[perm], self.args.use_gpu)
                batch_theta = common.torch.as_variable(self.train_theta[perm],
                                                       self.args.use_gpu)
                batch_images = batch_images.permute(0, 3, 1, 2)

                attack = self.setup_attack(self.model, batch_images[:split],
                                           batch_classes[:split])
                success, perturbations, _, _, _ = attack.run(
                    objective, self.args.verbose)
                batch_perturbations1 = common.torch.as_variable(
                    perturbations.astype(numpy.float32), self.args.use_gpu)
                batch_perturbed_images1 = batch_images[:split] + batch_perturbations1

                if isinstance(self.decoder, models.SelectiveDecoder):
                    self.decoder.set_code(batch_classes[split:])
                attack = self.setup_decoder_attack(self.decoder_classifier,
                                                   batch_theta[split:],
                                                   batch_classes[split:])
                attack.set_bound(torch.from_numpy(self.min_bound),
                                 torch.from_numpy(self.max_bound))
                decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run(
                    objective, self.args.verbose)

                batch_perturbed_theta = batch_theta[
                    split:] + common.torch.as_variable(decoder_perturbations,
                                                       self.args.use_gpu)
                batch_perturbed_images2 = self.decoder(batch_perturbed_theta)
                batch_perturbations2 = batch_perturbed_images2 - batch_images[
                    split:]

                batch_input_images = torch.cat(
                    (batch_perturbed_images1, batch_perturbed_images2), dim=0)

                self.model.train()
                assert self.model.training is True

                output_classes = self.model(batch_input_images)

                self.scheduler.optimizer.zero_grad()
                perturbation_loss = self.loss(batch_classes[:split],
                                              output_classes[:split])
                decoder_perturbation_loss = self.loss(batch_classes[split:],
                                                      output_classes[split:])
                loss = (perturbation_loss + decoder_perturbation_loss) / 2
                loss.backward()
                self.scheduler.optimizer.step()
                loss = loss.item()
                perturbation_loss = perturbation_loss.item()
                decoder_perturbation_loss = decoder_perturbation_loss.item()

                gradient = torch.mean(
                    torch.abs(list(self.model.parameters())[0].grad))
                gradient = gradient.item()

                perturbation_error = self.error(batch_classes[:split],
                                                output_classes[:split])
                perturbation_error = perturbation_error.item()

                decoder_perturbation_error = self.error(
                    batch_classes[split:], output_classes[split:])
                decoder_perturbation_error = decoder_perturbation_error.item()

                error = (perturbation_error + decoder_perturbation_error) / 2
            else:
                perm = numpy.concatenate((
                    numpy.take(
                        perturbation_permutation,
                        range(b * self.args.batch_size + split + split // 2,
                              (b + 1) * self.args.batch_size),
                        mode='wrap'),
                    numpy.take(
                        permutation,
                        range(b * self.args.batch_size,
                              b * self.args.batch_size + split + split // 2),
                        mode='wrap'),
                ),
                                         axis=0)
                batch_images = common.torch.as_variable(
                    self.train_images[perm], self.args.use_gpu)
                batch_classes = common.torch.as_variable(
                    self.train_codes[perm], self.args.use_gpu)
                batch_theta = common.torch.as_variable(self.train_theta[perm],
                                                       self.args.use_gpu)
                batch_images = batch_images.permute(0, 3, 1, 2)

                attack = self.setup_attack(self.model,
                                           batch_images[split // 2:split],
                                           batch_classes[split // 2:split])
                success, perturbations, _, _, _ = attack.run(
                    objective, self.args.verbose)
                batch_perturbations1 = common.torch.as_variable(
                    perturbations.astype(numpy.float32), self.args.use_gpu)
                batch_perturbed_images1 = batch_images[
                    split // 2:split] + batch_perturbations1

                if isinstance(self.decoder, models.SelectiveDecoder):
                    self.decoder.set_code(batch_classes[:split // 2])
                attack = self.setup_decoder_attack(self.decoder_classifier,
                                                   batch_theta[:split // 2],
                                                   batch_classes[:split // 2])
                attack.set_bound(torch.from_numpy(self.min_bound),
                                 torch.from_numpy(self.max_bound))
                decoder_success, decoder_perturbations, probabilities, norm, _ = attack.run(
                    objective, self.args.verbose)

                batch_perturbed_theta = batch_theta[:split //
                                                    2] + common.torch.as_variable(
                                                        decoder_perturbations,
                                                        self.args.use_gpu)
                batch_perturbed_images2 = self.decoder(batch_perturbed_theta)
                batch_perturbations2 = batch_perturbed_images2 - batch_images[:split
                                                                              //
                                                                              2]

                batch_input_images = torch.cat(
                    (batch_perturbed_images2, batch_perturbed_images1,
                     batch_images[split:]),
                    dim=0)

                self.model.train()
                assert self.model.training is True

                output_classes = self.model(batch_input_images)

                self.scheduler.optimizer.zero_grad()
                loss = self.loss(batch_classes[split:], output_classes[split:])
                perturbation_loss = self.loss(batch_classes[split // 2:split],
                                              output_classes[split // 2:split])
                decoder_perturbation_loss = self.loss(
                    batch_classes[:split // 2], output_classes[:split // 2])
                l = (loss + perturbation_loss + decoder_perturbation_loss) / 3
                l.backward()
                self.scheduler.optimizer.step()
                loss = loss.item()
                perturbation_loss = perturbation_loss.item()
                decoder_perturbation_loss = decoder_perturbation_loss.item()

                gradient = torch.mean(
                    torch.abs(list(self.model.parameters())[0].grad))
                gradient = gradient.item()

                error = self.error(batch_classes[split:],
                                   output_classes[split:])
                error = error.item()

                perturbation_error = self.error(
                    batch_classes[split // 2:split],
                    output_classes[split // 2:split])
                perturbation_error = perturbation_error.item()

                decoder_perturbation_error = self.error(
                    batch_classes[:split // 2], output_classes[:split // 2])
                decoder_perturbation_error = decoder_perturbation_error.item()

            iterations = numpy.mean(
                success[success >= 0]) if numpy.sum(success >= 0) > 0 else -1
            norm = numpy.mean(
                numpy.linalg.norm(perturbations.reshape(
                    perturbations.shape[0], -1),
                                  axis=1,
                                  ord=self.norm))
            success = numpy.sum(success >= 0) / self.args.batch_size

            decoder_iterations = numpy.mean(
                decoder_success[decoder_success >= 0]) if numpy.sum(
                    decoder_success >= 0) > 0 else -1
            decoder_norm = numpy.mean(
                numpy.linalg.norm(decoder_perturbations, axis=1,
                                  ord=self.norm))
            decoder_success = numpy.sum(
                decoder_success >= 0) / self.args.batch_size

            iteration = self.epoch * num_batches + b + 1
            self.train_statistics = numpy.vstack((
                self.train_statistics,
                numpy.array([[
                    iteration,  # iterations
                    iteration * (1 + self.args.max_iterations) *
                    self.args.batch_size,  # samples seen
                    min(num_batches, iteration) * self.args.batch_size +
                    iteration * self.args.max_iterations *
                    self.args.batch_size,  # unique samples seen
                    loss,
                    error,
                    perturbation_loss,
                    perturbation_error,
                    decoder_perturbation_loss,
                    decoder_perturbation_error,
                    success,
                    iterations,
                    norm,
                    decoder_success,
                    decoder_iterations,
                    decoder_norm,
                    gradient
                ]])))

            if b % self.args.skip == self.args.skip // 2:
                log('[Training] %d | %d: %g (%g) %g (%g) %g (%g) [%g]' % (
                    self.epoch,
                    b,
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 3]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 4]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 5]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 6]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 7]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 8]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, -1]),
                ))
                log('[Training] %d | %d: %g (%g, %g) %g (%g, %g)' % (
                    self.epoch,
                    b,
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 9]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 10]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 11]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 12]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 13]),
                    numpy.mean(self.train_statistics[
                        max(0, iteration - self.args.skip):iteration, 14]),
                ))

        self.debug('clean.%d.png' % self.epoch,
                   batch_images.permute(0, 2, 3, 1))
        self.debug('perturbed.%d.png' % self.epoch,
                   batch_perturbed_images1.permute(0, 2, 3, 1))
        self.debug('perturbed2.%d.png' % self.epoch,
                   batch_perturbed_images2.permute(0, 2, 3, 1))
        self.debug('perturbation.%d.png' % self.epoch,
                   batch_perturbations1.permute(0, 2, 3, 1),
                   cmap='seismic')
        self.debug('perturbation2.%d.png' % self.epoch,
                   batch_perturbations2.permute(0, 2, 3, 1),
                   cmap='seismic')
Ejemplo n.º 20
0
    log(global_data.log_path, 'info', end_time, global_data.data_id_list)
    log(global_data.log_path, 'info', end_time, '下载失败任务列表如下:')
    log(global_data.log_path, 'info', end_time, global_data.data_id_fail_list)
    log(global_data.log_path, 'info', end_time, '下载成功任务列表如下:')
    log(global_data.log_path, 'info', end_time, global_data.data_id_success_list)


def run(t_num=5):
    while True:
        task_list = functions.load_task(t_num, redis_key)
        if task_list:
            for task in task_list:
                task_id = task['video_id']
                global_data.data_id_list.append(task_id)
            DLD_FILE(task_list)
        else:
            break


if __name__ == '__main__':
    start_time = functions.get_time_now()
    log_name = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
    log_name = ''.join([site_name, log_name, '.txt'])
    global_data.log_path = os.path.join(os.getcwd(), 'logs', log_name)
    log(global_data.log_path, 'info', start_time, '开始下载任务')
    run(t_num=5)
    end_time = functions.get_time_now()
    print_status()


Ejemplo n.º 21
0
    def test(self):
        """
        Test the model.
        """

        self.model.eval()
        assert self.model.training is False
        log('[Training] %d set classifier to eval' % self.epoch)

        loss = error = 0
        num_batches = int(
            math.ceil(self.args.test_samples / self.args.batch_size))

        for b in range(num_batches):
            perm = numpy.take(range(self.args.test_samples),
                              range(b * self.args.batch_size,
                                    (b + 1) * self.args.batch_size),
                              mode='clip')
            batch_images = common.torch.as_variable(self.test_images[perm],
                                                    self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[perm],
                                                     self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_classes = self.model(batch_images)
            e = self.loss(batch_classes, output_classes)
            loss += e.item()
            a = self.error(batch_classes, output_classes)
            error += a.item()

        perturbation_loss = perturbation_error = success = iterations = norm = 0
        num_batches = int(
            math.ceil(self.args.attack_samples / self.args.batch_size))
        assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[
            0]

        for b in range(num_batches):
            perm = numpy.take(range(self.args.attack_samples),
                              range(b * self.args.batch_size,
                                    (b + 1) * self.args.batch_size),
                              mode='clip')
            batch_images = common.torch.as_variable(self.test_images[perm],
                                                    self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[perm],
                                                     self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            objective = self.objective_class()
            attack = self.setup_attack(self.model, batch_images, batch_classes)
            s, p, _, _, _ = attack.run(objective, False)

            batch_images = batch_images + common.torch.as_variable(
                p.astype(numpy.float32), self.args.use_gpu)
            output_classes = self.model(batch_images)

            e = self.loss(batch_classes, output_classes)
            perturbation_loss += e.item()

            e = self.error(batch_classes, output_classes)
            perturbation_error += e.item()

            iterations += numpy.mean(
                s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1
            norm += numpy.mean(
                numpy.linalg.norm(p.reshape(p.shape[0], -1),
                                  axis=1,
                                  ord=self.norm))
            success += numpy.sum(s >= 0) / self.args.batch_size

        decoder_perturbation_loss = decoder_perturbation_error = decoder_success = decoder_iterations = decoder_norm = 0
        num_batches = int(
            math.ceil(self.args.attack_samples / self.args.batch_size))
        assert self.args.attack_samples > 0 and self.args.attack_samples <= self.test_images.shape[
            0]

        for b in range(num_batches):
            perm = numpy.take(range(self.args.attack_samples),
                              range(b * self.args.batch_size,
                                    (b + 1) * self.args.batch_size),
                              mode='clip')
            batch_theta = common.torch.as_variable(self.test_theta[perm],
                                                   self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[perm],
                                                     self.args.use_gpu)

            objective = self.objective_class()
            if isinstance(self.decoder, models.SelectiveDecoder):
                self.decoder.set_code(batch_classes)
            attack = self.setup_decoder_attack(self.decoder_classifier,
                                               batch_theta, batch_classes)
            attack.set_bound(torch.from_numpy(self.min_bound),
                             torch.from_numpy(self.max_bound))
            s, p, _, _, _ = attack.run(objective, False)

            perturbations = common.torch.as_variable(p, self.args.use_gpu)
            batch_perturbed_theta = batch_theta + perturbations
            batch_perturbed_images = self.decoder(batch_perturbed_theta)

            output_classes = self.model(batch_perturbed_images)
            e = self.loss(batch_classes, output_classes)
            perturbation_loss += e.item()
            a = self.error(batch_classes, output_classes)
            perturbation_error += a.item()

            decoder_iterations += numpy.mean(
                s[s >= 0]) if numpy.sum(s >= 0) > 0 else -1
            decoder_norm += numpy.mean(
                numpy.linalg.norm(p.reshape(p.shape[0], -1),
                                  axis=1,
                                  ord=self.norm))
            decoder_success += numpy.sum(s >= 0) / self.args.batch_size

        loss /= num_batches
        error /= num_batches
        perturbation_loss /= num_batches
        perturbation_error /= num_batches
        success /= num_batches
        iterations /= num_batches
        norm /= num_batches
        decoder_perturbation_loss /= num_batches
        decoder_perturbation_error /= num_batches
        decoder_success /= num_batches
        decoder_iterations /= num_batches
        decoder_norm /= num_batches
        log('[Training] %d: test %g (%g) %g (%g) %g (%g)' %
            (self.epoch, loss, error, perturbation_loss, perturbation_error,
             decoder_perturbation_loss, decoder_perturbation_error))
        log('[Training] %d: test %g (%g, %g) %g (%g, %g)' %
            (self.epoch, success, iterations, norm, decoder_success,
             decoder_iterations, decoder_norm))

        num_batches = int(
            math.ceil(self.train_images.shape[0] / self.args.batch_size))
        iteration = self.epoch * num_batches
        self.test_statistics = numpy.vstack((
            self.test_statistics,
            numpy.array([[
                iteration,  # iterations
                iteration * (1 + self.args.max_iterations) *
                self.args.batch_size,  # samples seen
                min(num_batches, iteration) * self.args.batch_size +
                iteration * self.args.max_iterations *
                self.args.batch_size,  # unique samples seen
                loss,
                error,
                perturbation_loss,
                perturbation_error,
                decoder_perturbation_loss,
                decoder_perturbation_error,
                success,
                iterations,
                norm,
                decoder_success,
                decoder_iterations,
                decoder_norm,
            ]])))
Ejemplo n.º 22
0
    def doAfterDoCurAction(self, player, action, actionCards):
        log(
            u'[doAfterDoCurAction] room[%s] player[%s].' %
            (self.roomId, player.chair), LOG_LEVEL_RELEASE)

        self.nextProc(player)
    def __init__(self, args=None):
        """
        Initialize.

        :param args: optional arguments if not to use sys.argv
        :type args: [str]
        """

        self.args = None
        """ Arguments of program. """

        parser = self.get_parser()
        if args is not None:
            self.args = parser.parse_args(args)
        else:
            self.args = parser.parse_args()

        self.train_images = None
        """ (numpy.ndarray) Images to train on. """

        self.test_images = None
        """ (numpy.ndarray) Images to test on. """

        self.train_codes = None
        """ (numpy.ndarray) Labels to train on. """

        self.test_codes = None
        """ (numpy.ndarray) Labels to test on. """

        if self.args.log_file:
            utils.makedir(os.path.dirname(self.args.log_file))
            Log.get_instance().attach(open(self.args.log_file, 'w'))

        log('-- ' + self.__class__.__name__)
        for key in vars(self.args):
            log('[Training] %s=%s' % (key, str(getattr(self.args, key))))

        utils.makedir(os.path.dirname(self.args.encoder_file))
        utils.makedir(os.path.dirname(self.args.decoder_file))
        utils.makedir(os.path.dirname(self.args.log_file))

        self.resolution = None
        """ (int) Resolution. """

        self.encoder = None
        """ (models.LearnedVariationalEncoder) Encoder. """

        self.decoder = None
        """ (models.LearnedDecoder) Decoder. """

        self.classifier = None
        """ (models.Classifier) Classifier. """

        self.encoder_scheduler = None
        """ (scheduler.Scheduler) Encoder schduler. """

        self.decoder_scheduler = None
        """ (scheduler.Scheduler) Decoder schduler. """

        self.classifier_scheduler = None
        """ (scheduler.Scheduler) Classifier schduler. """

        self.random_codes = None
        """ (numyp.ndarray) Random codes. """

        self.train_statistics = numpy.zeros((0, 15))
        """ (numpy.ndarray) Will hold training statistics. """

        self.test_statistics = numpy.zeros((0, 12))
        """ (numpy.ndarray) Will hold testing statistics. """

        self.results = dict()
        """ (dict) Results. """

        self.logvar = -2.5
        """ (float) Log-variance hyper parameter. """
Ejemplo n.º 24
0
    def onExitGame(self,
                   player,
                   sendMessage=True,
                   byPlayer=False,
                   isEndGame=False):
        """
        退出游戏
        """
        if not player or not player.game or player.game != self:
            return

        isOwnnerEnd = False  #房主提前退出导致的游戏结束
        isDrop = False  #是否断开玩家,只有离开当前game时才为true
        side = player.chair

        if (byPlayer and self.stage
                == WAIT_START) or isEndGame:  #未开始前客户端主动退出,或者游戏结束
            isDrop = True
            log(
                u'[on exit]nickname[%s] is exit room[%s] in wait time.' %
                (player.nickname, self.roomId), LOG_LEVEL_RELEASE)
            if sendMessage:
                exitResp = baseProto_pb2.S_C_ExitRoom()
                exitResp.info.side = side
                exitResp.info.nickname = player.nickname
                self.sendExclude((player, ), exitResp)
            self.players[side] = None
            self.playerCount -= 1
            #该情况下不记录重连信息,无法重连
            self.server.tryRmExitPlayerData(player, self)
            if not isEndGame and side == OWNNER_SIDE and not self.ownner:  #开始前庄家退出T出全员解散
                log(
                    u'[try dissolve game]game is not start and master[%s] is exit.'
                    % (player.nickname), LOG_LEVEL_RELEASE)
                isOwnnerEnd = True
        else:  #游戏中退出,需要记录重连信息
            log(
                u'[on exit]nickname[%s] is exit room[%s] in game time.' %
                (player.nickname, self.roomId), LOG_LEVEL_RELEASE)
            self.exitPlayers.append(side)
            robot = self.getRobot()
            self.setPlayerCopy(robot, player)
            self.server.saveExitPlayer(player, self)

            #发送离线状态
            # self.onLeaveGame(robot.chair)
            robot.isOnline = False
            _resp = baseProto_pb2.S_C_OnlineState()
            _resp.changeSide = player.chair
            _resp.isOnline = player.isOnline
            self.sendExclude((player, ), _resp)

        player.chair = consts.SIDE_UNKNOWN
        player.game = None

        self.server.userDBOnExitGame(player, self, isDrop)

        if side == self.controlPlayerSide:
            for otherPlayer in self.getPlayers((robot, )):
                self.onExitGame(otherPlayer, sendMessage=False)
        if byPlayer and self.stage == WAIT_START:
            resp = baseProto_pb2.S_C_ExitRoomResult()
            resp.result = True
            self.sendOne(player, resp)
        if isOwnnerEnd:
            self.endGame(isNotStart=True)
    def train(self, epoch):
        """
        Train for one epoch.

        :param epoch: current epoch
        :type epoch: int
        """

        self.encoder.train()
        log('[Training] %d set encoder to train' % epoch)
        self.decoder.train()
        log('[Training] %d set decoder to train' % epoch)
        self.classifier.train()
        log('[Training] %d set classifier to train' % epoch)

        num_batches = int(
            math.ceil(self.train_images.shape[0] / self.args.batch_size))
        assert self.encoder.training is True

        permutation = numpy.random.permutation(self.train_images.shape[0])
        permutation = numpy.concatenate(
            (permutation, permutation[:self.args.batch_size]), axis=0)

        for b in range(num_batches):
            self.encoder_scheduler.update(epoch, float(b) / num_batches)
            self.decoder_scheduler.update(epoch, float(b) / num_batches)
            self.classifier_scheduler.update(epoch, float(b) / num_batches)

            perm = permutation[b * self.args.batch_size:(b + 1) *
                               self.args.batch_size]
            batch_images = common.torch.as_variable(self.train_images[perm],
                                                    self.args.use_gpu, True)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_mu, output_logvar = self.encoder(batch_images)
            output_codes = self.reparameterize(output_mu, output_logvar)
            output_images = self.decoder(output_codes)

            output_real_classes = self.classifier(batch_images)
            output_reconstructed_classes = self.classifier(output_images)

            latent_loss = self.latent_loss(output_mu, output_logvar)
            reconstruction_loss = self.reconstruction_loss(
                batch_images, output_images)
            decoder_loss = self.decoder_loss(output_reconstructed_classes)
            discriminator_loss = self.discriminator_loss(
                output_real_classes, output_reconstructed_classes)

            self.encoder_scheduler.optimizer.zero_grad()
            loss = latent_loss + self.args.beta * reconstruction_loss + self.args.gamma * decoder_loss + self.args.eta * torch.sum(
                torch.abs(output_logvar))
            loss.backward(retain_graph=True)
            self.encoder_scheduler.optimizer.step()

            self.decoder_scheduler.optimizer.zero_grad()
            loss = self.args.beta * reconstruction_loss + self.args.gamma * decoder_loss
            loss.backward(retain_graph=True)
            self.decoder_scheduler.optimizer.step()

            self.classifier_scheduler.optimizer.zero_grad()
            loss = self.args.gamma * discriminator_loss
            loss.backward()
            self.classifier_scheduler.optimizer.step()

            reconstruction_error = self.reconstruction_error(
                batch_images, output_images)
            iteration = epoch * num_batches + b + 1
            self.train_statistics = numpy.vstack(
                (self.train_statistics,
                 numpy.array([
                     iteration, iteration * self.args.batch_size,
                     min(num_batches, iteration),
                     min(num_batches, iteration) * self.args.batch_size,
                     reconstruction_loss.data, reconstruction_error.data,
                     latent_loss.data,
                     torch.mean(output_mu).item(),
                     torch.var(output_mu).item(),
                     torch.mean(output_logvar).item(),
                     decoder_loss.item(),
                     discriminator_loss.item(),
                     torch.mean(
                         torch.abs(list(
                             self.encoder.parameters())[0].grad)).item(),
                     torch.mean(
                         torch.abs(list(
                             self.decoder.parameters())[0].grad)).item(),
                     torch.mean(
                         torch.abs(list(
                             self.classifier.parameters())[0].grad)).item()
                 ])))

            skip = 10
            if b % skip == skip // 2:
                log('[Training] %d | %d: %g (%g) %g (%g, %g, %g)' % (
                    epoch,
                    b,
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 4]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 5]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 6]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 7]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 8]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 9]),
                ))
                log('[Training] %d | %d: %g %g (%g, %g, %g)' % (
                    epoch,
                    b,
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 10]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 11]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 12]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 13]),
                    numpy.mean(self.train_statistics[max(0, iteration -
                                                         skip):iteration, 14]),
                ))
Ejemplo n.º 26
0
 def sendExclude(self, excludePlayers, protocol_obj):
     log(
         u'[try send]game[%s] exit player %s' %
         (self.roomId, self.exitPlayers), LOG_LEVEL_RELEASE)
     self.server.send(self.getOnlinePlayers(excludePlayers), protocol_obj)
    def test(self, epoch):
        """
        Test the model.

        :param epoch: current epoch
        :type epoch: int
        """

        self.encoder.eval()
        log('[Training] %d set encoder to eval' % epoch)
        self.decoder.eval()
        log('[Training] %d set decoder to eval' % epoch)
        self.classifier.eval()
        log('[Training] %d set classifier to eval' % epoch)

        latent_loss = 0
        reconstruction_loss = 0
        reconstruction_error = 0
        decoder_loss = 0
        discriminator_loss = 0
        mean = 0
        var = 0
        logvar = 0
        pred_images = None
        pred_codes = None

        num_batches = int(
            math.ceil(self.test_images.shape[0] / self.args.batch_size))
        assert self.encoder.training is False

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.test_images.shape[0])
            batch_images = common.torch.as_variable(
                self.test_images[b_start:b_end], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_mu, output_logvar = self.encoder(batch_images)
            output_images = self.decoder(output_mu)

            output_real_classes = self.classifier(batch_images)
            output_reconstructed_classes = self.classifier(output_images)

            # Latent loss.
            e = self.latent_loss(output_mu, output_logvar)
            latent_loss += e.item()

            # Reconstruction loss.
            e = self.reconstruction_loss(batch_images, output_images)
            reconstruction_loss += e.item()

            # Reconstruction error.
            e = self.reconstruction_error(batch_images, output_images)
            reconstruction_error += e.item()

            e = self.decoder_loss(output_reconstructed_classes)
            decoder_loss += e.item()

            # Adversarial loss.
            e = self.discriminator_loss(output_real_classes,
                                        output_reconstructed_classes)
            discriminator_loss += e.item()

            mean += torch.mean(output_mu).item()
            var += torch.var(output_mu).item()
            logvar += torch.mean(output_logvar).item()

            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            pred_images = common.numpy.concatenate(pred_images, output_images)
            output_codes = output_mu.cpu().detach().numpy()
            pred_codes = common.numpy.concatenate(pred_codes, output_codes)

        utils.write_hdf5(self.args.reconstruction_file, pred_images)
        log('[Training] %d: wrote %s' % (epoch, self.args.reconstruction_file))

        if utils.display():
            png_file = self.args.reconstruction_file + '.%d.png' % epoch
            if epoch == 0:
                vis.mosaic(png_file, self.test_images[:225], 15, 5, 'gray', 0,
                           1)
            else:
                vis.mosaic(png_file, pred_images[:225], 15, 5, 'gray', 0, 1)
            log('[Training] %d: wrote %s' % (epoch, png_file))

        latent_loss /= num_batches
        reconstruction_loss /= num_batches
        reconstruction_error /= num_batches
        decoder_loss /= num_batches
        discriminator_loss /= num_batches
        mean /= num_batches
        var /= num_batches
        logvar /= num_batches
        log('[Training] %d: test %g (%g) %g (%g, %g, %g)' %
            (epoch, reconstruction_loss, reconstruction_error, latent_loss,
             mean, var, logvar))
        log('[Training] %d: test %g %g' %
            (epoch, decoder_loss, discriminator_loss))

        num_batches = int(
            math.ceil(self.train_images.shape[0] / self.args.batch_size))
        iteration = epoch * num_batches
        self.test_statistics = numpy.vstack(
            (self.test_statistics,
             numpy.array([
                 iteration, iteration * self.args.batch_size,
                 min(num_batches, iteration),
                 min(num_batches, iteration) * self.args.batch_size,
                 reconstruction_loss, reconstruction_error, latent_loss, mean,
                 var, logvar, decoder_loss, discriminator_loss
             ])))

        pred_images = None
        if self.random_codes is None:
            self.random_codes = common.numpy.truncated_normal(
                (1000, self.args.latent_space_size)).astype(numpy.float32)
        num_batches = int(
            math.ceil(self.random_codes.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.test_images.shape[0])
            if b_start >= b_end: break

            batch_codes = common.torch.as_variable(
                self.random_codes[b_start:b_end], self.args.use_gpu)
            output_images = self.decoder(batch_codes)

            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            pred_images = common.numpy.concatenate(pred_images, output_images)

        utils.write_hdf5(self.args.random_file, pred_images)
        log('[Training] %d: wrote %s' % (epoch, self.args.random_file))

        if utils.display() and epoch > 0:
            png_file = self.args.random_file + '.%d.png' % epoch
            vis.mosaic(png_file, pred_images[:225], 15, 5, 'gray', 0, 1)
            log('[Training] %d: wrote %s' % (epoch, png_file))

        interpolations = None
        perm = numpy.random.permutation(numpy.array(range(
            pred_codes.shape[0])))

        for i in range(50):
            first = pred_codes[i]
            second = pred_codes[perm[i]]
            linfit = scipy.interpolate.interp1d([0, 1],
                                                numpy.vstack([first, second]),
                                                axis=0)
            interpolations = common.numpy.concatenate(
                interpolations, linfit(numpy.linspace(0, 1, 10)))

        pred_images = None
        num_batches = int(
            math.ceil(interpolations.shape[0] / self.args.batch_size))
        interpolations = interpolations.astype(numpy.float32)

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.test_images.shape[0])
            if b_start >= b_end: break

            batch_codes = common.torch.as_variable(
                interpolations[b_start:b_end], self.args.use_gpu)
            output_images = self.decoder(batch_codes)
            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            pred_images = common.numpy.concatenate(pred_images, output_images)

            if b % 100 == 50:
                log('[Testing] %d' % b)

        utils.write_hdf5(self.args.interpolation_file, pred_images)
        log('[Testing] wrote %s' % self.args.interpolation_file)

        if utils.display() and epoch > 0:
            png_file = self.args.interpolation_file + '.%d.png' % epoch
            vis.mosaic(png_file, pred_images[:100], 10, 5, 'gray', 0, 1)
            log('[Training] %d: wrote %s' % (epoch, png_file))
Ejemplo n.º 28
0
    def __init__(self, args=None):
        """
        Initialize.

        :param args: optional arguments if not to use sys.argv
        :type args: [str]
        """

        self.args = None
        """ Arguments of program. """

        parser = self.get_parser()
        if args is not None:
            self.args = parser.parse_args(args)
        else:
            self.args = parser.parse_args()

        self.train_images = None
        """ (numpy.ndarray) Images to train on. """

        self.test_images = None
        """ (numpy.ndarray) Images to test on. """

        self.train_codes = None
        """ (numpy.ndarray) Labels to train on. """

        self.test_codes = None
        """ (numpy.ndarray) Labels to test on. """

        self.resolution = None
        """ (int) Resolution. """

        self.encoder = None
        """ (models.LearnedEncoder) Encoder. """

        self.decoder = None
        """ (models.LearnedDecoder) Decoder. """

        self.reconstruction_error = 0
        """ (int) Reconstruction error. """

        self.code_mean = 0
        """ (int) Reconstruction error. """

        self.code_var = 0
        """ (int) Reconstruction error. """

        self.pred_images = None
        """ (numpy.ndarray) Test images reconstructed. """

        self.pred_codes = None
        """ (numpy.ndarray) Test latent codes. """

        self.results = dict()
        """ (dict) Results. """

        utils.makedir(os.path.dirname(self.args.log_file))
        if self.args.log_file:
            Log.get_instance().attach(open(self.args.log_file, 'w'))

        log('-- ' + self.__class__.__name__)
        for key in vars(self.args):
            log('[Testing] %s=%s' % (key, str(getattr(self.args, key))))
    def loop(self):
        """
        Main loop for training and testing, saving ...
        """

        auto_encoder_params = {
            'lr': self.args.base_lr,
            'lr_decay': self.args.base_lr_decay,
            'lr_min': 0.000000001,
            'weight_decay': self.args.weight_decay
        }

        classifier_params = {
            'lr': self.args.base_lr,
            'lr_decay': self.args.base_lr_decay,
            'lr_min': 0.000000001,
            'weight_decay': self.args.weight_decay
        }

        e = 0
        if os.path.exists(self.args.encoder_file) and os.path.exists(
                self.args.decoder_file) and os.path.exists(
                    self.args.classifier_file):
            state = State.load(self.args.encoder_file)
            log('[Training] loaded %s' % self.args.encoder_file)
            self.encoder.load_state_dict(state.model)
            log('[Training] loaded encoder')

            if self.args.use_gpu and not cuda.is_cuda(self.encoder):
                self.encoder = self.encoder.cuda()

            optimizer = torch.optim.Adam(list(self.encoder.parameters()),
                                         auto_encoder_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.encoder_scheduler = ADAMScheduler(optimizer,
                                                   **auto_encoder_params)

            state = State.load(self.args.decoder_file)
            log('[Training] loaded %s' % self.args.decoder_file)
            self.decoder.load_state_dict(state.model)
            log('[Training] loaded decoder')

            if self.args.use_gpu and not cuda.is_cuda(self.decoder):
                self.decoder = self.decoder.cuda()

            optimizer = torch.optim.Adam(list(self.decoder.parameters()),
                                         auto_encoder_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.decoder_scheduler = ADAMScheduler(optimizer,
                                                   **auto_encoder_params)

            state = State.load(self.args.classifier_file)
            log('[Training] loaded %s' % self.args.classifier_file)
            self.classifier.load_state_dict(state.model)
            log('[Training] loaded decoder')

            if self.args.use_gpu and not cuda.is_cuda(self.classifier):
                self.classifier = self.classifier.cuda()

            optimizer = torch.optim.Adam(list(self.classifier.parameters()),
                                         classifier_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.classifier_scheduler = ADAMScheduler(optimizer,
                                                      **classifier_params)

            e = state.epoch + 1
            self.encoder_scheduler.update(e)
            self.decoder_scheduler.udpate(e)
            self.classifier_scheduler.update(e)
        else:
            if self.args.use_gpu and not cuda.is_cuda(self.encoder):
                self.encoder = self.encoder.cuda()
            if self.args.use_gpu and not cuda.is_cuda(self.decoder):
                self.decoder = self.decoder.cuda()
            if self.args.use_gpu and not cuda.is_cuda(self.classifier):
                self.classifier = self.classifier.cuda()

            self.encoder_scheduler = ADAMScheduler(
                list(self.encoder.parameters()), **auto_encoder_params)
            self.encoder_scheduler.initialize()  # !

            self.decoder_scheduler = ADAMScheduler(
                list(self.decoder.parameters()), **auto_encoder_params)
            self.decoder_scheduler.initialize()  # !

            self.classifier_scheduler = ADAMScheduler(
                list(self.classifier.parameters()), **classifier_params)
            self.classifier_scheduler.initialize()  # !

        log('[Training] model needs %gMiB' %
            (cuda.estimate_size(self.encoder) / (1024 * 1024)))

        while e < self.args.epochs:
            log('[Training] %s' % self.encoder_scheduler.report())
            log('[Training] %s' % self.decoder_scheduler.report())
            log('[Training] %s' % self.classifier_scheduler.report())

            testing = elapsed(functools.partial(self.test, e))
            training = elapsed(functools.partial(self.train, e))
            log('[Training] %gs training, %gs testing' % (training, testing))

            #utils.remove(self.args.encoder_file + '.%d' % (e - 1))
            #utils.remove(self.args.decoder_file + '.%d' % (e - 1))
            #utils.remove(self.args.classifier_file + '.%d' % (e - 1))
            State.checkpoint(self.encoder, self.encoder_scheduler.optimizer, e,
                             self.args.encoder_file + '.%d' % e)
            State.checkpoint(self.decoder, self.decoder_scheduler.optimizer, e,
                             self.args.decoder_file + '.%d' % e)
            State.checkpoint(self.classifier,
                             self.classifier_scheduler.optimizer, e,
                             self.args.classifier_file + '.%d' % e)

            log('[Training] %d: checkpoint' % e)
            torch.cuda.empty_cache()  # necessary?

            # Save statistics and plots.
            if self.args.training_file:
                utils.write_hdf5(self.args.training_file,
                                 self.train_statistics)
                log('[Training] %d: wrote %s' % (e, self.args.training_file))
            if self.args.testing_file:
                utils.write_hdf5(self.args.testing_file, self.test_statistics)
                log('[Training] %d: wrote %s' % (e, self.args.testing_file))

            #if utils.display():
            #    self.plot()

            e += 1  # !

        testing = elapsed(functools.partial(self.test, e))
        log('[Training] %gs testing' % (testing))

        #utils.remove(self.args.encoder_file + '.%d' % (e - 1))
        #utils.remove(self.args.decoder_file + '.%d' % (e - 1))
        #utils.remove(self.args.classifier_file + '.%d' % (e - 1))
        State.checkpoint(self.encoder, self.encoder_scheduler.optimizer, e,
                         self.args.encoder_file)
        State.checkpoint(self.decoder, self.decoder_scheduler.optimizer, e,
                         self.args.decoder_file)
        State.checkpoint(self.classifier, self.classifier_scheduler.optimizer,
                         e, self.args.classifier_file)

        self.results = {
            'training_statistics': self.train_statistics,
            'testing_statistics': self.test_statistics,
        }
        if self.args.results_file:
            utils.write_pickle(self.args.results_file, self.results)
            log('[Training] wrote %s' % self.args.results_file)
Ejemplo n.º 30
0
    def main(self):
        """
        Main which should be overwritten.
        """

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_codes_file)

        self.train_codes = self.train_codes[:, self.args.label_index]
        self.test_codes = self.test_codes[:, self.args.label_index]

        if self.args.label >= 0:
            self.train_images = self.train_images[self.train_codes ==
                                                  self.args.label]
            self.test_images = self.test_images[self.test_codes ==
                                                self.args.label]

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.encoder = models.LearnedVariationalEncoder(
            self.args.latent_space_size,
            0,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        self.decoder = models.LearnedDecoder(
            self.args.latent_space_size,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        log(self.encoder)
        log(self.decoder)

        assert os.path.exists(self.args.encoder_file) and os.path.exists(
            self.args.decoder_file)
        state = State.load(self.args.encoder_file)
        log('[Testing] loaded %s' % self.args.encoder_file)

        self.encoder.load_state_dict(state.model)
        log('[Testing] loaded encoder')

        state = State.load(self.args.decoder_file)
        log('[Testing] loaded %s' % self.args.decoder_file)

        self.decoder.load_state_dict(state.model)
        log('[Testing] loaded decoder')

        if self.args.use_gpu and not cuda.is_cuda(self.encoder):
            self.encoder = self.encoder.cuda()
        if self.args.use_gpu and not cuda.is_cuda(self.decoder):
            self.decoder = self.decoder.cuda()

        log('[Testing] model needs %gMiB' %
            ((cuda.estimate_size(self.encoder) +
              cuda.estimate_size(self.decoder)) / (1024 * 1024)))
        self.test()
    def load_data(self):
        """
        Load data.
        """

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Training] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Training] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Training] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(
            numpy.float32)
        log('[Training] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.float32)
        log('[Training] read %s' % self.args.test_codes_file)

        self.train_codes = self.train_codes[:, self.args.label_index]
        self.test_codes = self.test_codes[:, self.args.label_index]

        if self.args.label >= 0:
            self.train_images = self.train_images[self.train_codes ==
                                                  self.args.label]
            self.test_images = self.test_images[self.test_codes ==
                                                self.args.label]

        if self.args.max_samples > 0:
            self.train_images = self.train_images[:self.args.max_samples]
            self.train_codes = self.train_codes[:self.args.max_samples]
Ejemplo n.º 32
0
 def test_VehicleController(self):
     log('开始跑测试用例')
     Request().send_request(apifile_dir + 'VehicleController.xlsx',
                            'Sheet1')  ##只需要改动 xxx.xlsx文件即可
Ejemplo n.º 33
0
    def train(self):
        """
        Training configuration.
        """

        def get_augmentation(crop=True, flip=True):
            augmenters = []
            if crop:
                augmenters.append(iaa.CropAndPad(
                    px=((0, 4), (0, 4), (0, 4), (0, 4)),
                    pad_mode='constant',
                    pad_cval=(0, 0),
                ))
            if flip:
                augmenters.append(iaa.Fliplr(0.5))

            return iaa.Sequential(augmenters)

        writer = common.summary.SummaryPickleWriter('%s/logs/' % self.args.directory, max_queue=100)
        if self.args.tensorboard:
            writer = torch.utils.tensorboard.SummaryWriter('%s/logs/' % self.args.directory, max_queue=100)

        crop = False
        flip = False
        if self.args.dataset == 'svhn':
            crop = True
        elif self.args.dataset == 'cifar10':
            crop = True
            flip = True

        epochs = 200
        snapshot = 10

        model_file = '%s/classifier.pth.tar' % self.args.directory
        incomplete_model_file = find_incomplete_state_file(model_file)
        load_file = model_file
        if incomplete_model_file is not None:
            load_file = incomplete_model_file

        start_epoch = 0
        if os.path.exists(load_file):
            state = common.state.State.load(load_file)
            self.model = state.model
            start_epoch = state.epoch + 1
            log('loaded %s' % load_file)
        else:
            self.model = models.ResNet(10, [self.trainset.images.shape[3], self.trainset.images.shape[1], self.trainset.images.shape[2]],
                                       blocks=[3, 3, 3])
        if self.args.cuda:
            self.model = self.model.cuda()

        augmentation = get_augmentation(crop=crop, flip=flip)
        optimizer = torch.optim.SGD(self.model.parameters(), lr=0.075, momentum=0.9)
        scheduler = common.train.get_exponential_scheduler(optimizer, batches_per_epoch=len(self.trainloader),
                                                           gamma=0.97)
        trainer = common.train.NormalTraining(self.model, self.trainloader, self.testloader, optimizer, scheduler,
                                              augmentation=augmentation, writer=writer, cuda=self.args.cuda)

        self.model.train()
        for epoch in range(start_epoch, epochs):
            trainer.step(epoch)
            writer.flush()

            snapshot_model_file = '%s/classifier.pth.tar.%d' % (self.args.directory, epoch)
            common.state.State.checkpoint(snapshot_model_file, self.model, optimizer, scheduler, epoch)

            previous_model_file = '%s/classifier.pth.tar.%d' % (self.args.directory, epoch - 1)
            if os.path.exists(previous_model_file) and (epoch - 1) % snapshot > 0:
                os.unlink(previous_model_file)

        previous_model_file = '%s/classifier.pth.tar.%d' % (self.args.directory, epoch - 1)
        if os.path.exists(previous_model_file) and (epoch - 1) % snapshot > 0:
            os.unlink(previous_model_file)

        common.state.State.checkpoint(model_file, self.model, optimizer, scheduler, epoch)
    def test(self):
        """
        Test classifier to identify valid samples to attack.
        """

        self.model.eval()
        assert self.model.training is False
        assert self.perturbation_codes.shape[0] == self.perturbations.shape[0]
        assert self.test_codes.shape[0] == self.test_images.shape[0]
        assert len(self.perturbations.shape) == 4
        assert len(self.test_images.shape) == 4

        perturbations_accuracy = None
        num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0])
            batch_perturbations = common.torch.as_variable(self.perturbations[b_start: b_end], self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.perturbation_codes[b_start: b_end], self.args.use_gpu)
            batch_perturbations = batch_perturbations.permute(0, 3, 1, 2)

            output_classes = self.model(batch_perturbations)
            values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
            errors = torch.abs(indices - batch_classes)
            perturbations_accuracy = common.numpy.concatenate(perturbations_accuracy, errors.data.cpu().numpy())

            for n in range(batch_perturbations.size(0)):
                log('[Testing] %d: original success=%d, transfer accuracy=%d' % (n, self.original_success[b_start + n], errors[n].item()))

        self.transfer_success[perturbations_accuracy == 0] = -1
        self.transfer_success = self.transfer_success.reshape((self.N_samples, self.N_attempts))
        self.transfer_success = numpy.swapaxes(self.transfer_success, 0, 1)

        utils.makedir(os.path.dirname(self.args.transfer_success_file))
        utils.write_hdf5(self.args.transfer_success_file, self.transfer_success)
        log('[Testing] wrote %s' % self.args.transfer_success_file)

        num_batches = int(math.ceil(self.test_images.shape[0] / self.args.batch_size))
        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.test_images.shape[0])
            batch_images = common.torch.as_variable(self.test_images[b_start: b_end], self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[b_start: b_end], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_classes = self.model(batch_images)
            values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
            errors = torch.abs(indices - batch_classes)

            self.transfer_accuracy = common.numpy.concatenate(self.transfer_accuracy, errors.data.cpu().numpy())

            if b % 100 == 0:
                log('[Testing] computing accuracy %d' % b)

        self.transfer_accuracy = self.transfer_accuracy == 0
        log('[Testing] original accuracy=%g' % (numpy.sum(self.original_accuracy)/float(self.original_accuracy.shape[0])))
        log('[Testing] transfer accuracy=%g' % (numpy.sum(self.transfer_accuracy)/float(self.transfer_accuracy.shape[0])))
        log('[Testing] accuracy difference=%g' % (numpy.sum(self.transfer_accuracy != self.original_accuracy)/float(self.transfer_accuracy.shape[0])))
        log('[Testing] accuracy difference on %d samples=%g' % (self.N_samples, numpy.sum(self.transfer_accuracy[:self.N_samples] != self.original_accuracy[:self.N_samples])/float(self.N_samples)))
        self.transfer_accuracy = numpy.logical_and(self.transfer_accuracy, self.original_accuracy)

        utils.makedir(os.path.dirname(self.args.transfer_accuracy_file))
        utils.write_hdf5(self.args.transfer_accuracy_file, self.transfer_accuracy)
        log('[Testing] wrote %s' % self.args.transfer_accuracy_file)
Ejemplo n.º 35
0
#          By:
# Description:
# **************************************************************************
from lxml import html
from random import sample
from string import ascii_letters, digits
from time import sleep
import os
import sys
sys.path.append('..')

from common.pool import ThreadPool
from common.log import log
from common.spider import SpiderMixin

logger = log('spider.log')


class Spider(SpiderMixin):
    def __init__(self, start_url, classify):
        self.start_url = start_url
        self.classify = classify
        self.prefix_url = 'http://210.29.97.21'

    def thread(self, href, title):
        path = 'download/{}'.format(self.classify)
        filename = 'download/{}/{}.pdf'.format(self.classify, title)
        if not os.path.exists(path):
            os.mkdir(path)
        if not os.path.exists(filename):
            self.parse_download(self.request(href), title=title)