Exemplo n.º 1
0
    def __init__(self,
                 numTilings=1,
                 parameters=2,
                 rlAlpha=0.5,
                 rlLambda=0.9,
                 rlGamma=0.9,
                 cTableSize=0):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma

        self.prediction = None
        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.traceH = TraceHolder((self.numTilings**(self.parameters) + 1),
                                  self.rlLambda, 1000)
        self.F = [0 for item in range(self.numTilings)
                  ]  # the indices of the returned tiles will go in here
        self.theta = [
            0 for item in range((self.numTilings**(self.parameters + 1)) + 1)
        ]  # weight vector.
        self.cTable = CollisionTable(cTableSize, 'safe')  # look into this...
        self.verifier = Verifier(self.rlGamma)
Exemplo n.º 2
0
    def __init__(self,
                 numTilings=2,
                 parameters=2,
                 rlAlpha=0.5,
                 rlLambda=0.9,
                 rlGamma=0.9,
                 cTableSize=0):
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma

        self.prediction = None
        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.F = [0 for item in range(self.numTilings)]
        self.F2 = [0 for item in range(self.numTilings)]
        self.theta = [
            0 for item in range((self.numTilings**(self.parameters + 1)) + 1)
        ]
        self.cTable = CollisionTable(cTableSize, 'safe')
        self.update(None, None)
        self.e = [
            0 for item in range((self.numTilings**(self.parameters + 1)) + 1)
        ]
        self.verifier = Verifier(self.rlGamma)
Exemplo n.º 3
0
    def verify_backup(self, host, dir_to_verify, logger):
        verifier_obj = Verifier(host=host,
                                dir_to_verify=dir_to_verify,
                                logger=logger)
        verifier_obj.setup()

        return verifier_obj.verify()
Exemplo n.º 4
0
    def __init__(self, binary):
        self.binary = os.path.abspath(binary)
        self.payloads = []

        self.bug_finder = BugFinder(self.binary)
        self.analyzer = Analyzer(self.binary)
        self.exploiter = Exploiter(self.binary)
        self.verifier = Verifier(self.binary)
Exemplo n.º 5
0
    def test_verify_timestamp(self):

        v = Verifier(None)
        v.tsRequest = os.path.join(
            settings.VERIFIED_ZIP,
            "1679091c5a880faf6fb5e6087eb1b2dc.tsrequest")
        v.tsResponse = os.path.join(settings.VERIFIED_ZIP,
                                    "1679091c5a880faf6fb5e6087eb1b2dc.p7s")
        self.assertTrue(v.verifyTimestamp())
	def __init__(this):
		Verifier.__init__(this)
		# below we add two vectors for comparing between what feature vector is of the ideal Booter
		# and what feature vector is of the ideal non-Booter.
		# currently they are defined as completely 0.0 or 1.0; experimenting wtih different values e.g.
		# averages of Booter training dataset, numbers more often associated with Booters and so on did
		# not generate consistently better results (sometimes it did, sometimes it didn't). We thus keep
		# the [1.0,...,1.0] vector as this directly corresponds with the individual distance each element
		# has to its maximum value.
		this.vector_booter     = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
Exemplo n.º 7
0
    def verify_backup(self, host, dir_to_verify, logger):
        verifier_obj = Verifier(host=host, dir_to_verify=dir_to_verify,
				logger=logger)

        backup_lock.acquire()
        try:
            verifier_obj.setup()
        finally:
            backup_lock.release()

        return verifier_obj.verify()
 def test_adv_attack(self):
     debug = True
     src = 'testing/tests/data/master2_4s_1080.mp4'
     rend_path = 'testing/tests/data/rend2_4s_1080_adv_attack.mp4'
     verifier = Verifier(
         10,
         'http://storage.googleapis.com/verification-models/verification-metamodel-2020-07-06.tar.xz',
         False, False, debug)
     verification_result = verifier.verify(src, [{'uri': rend_path}])
     print(verification_result)
     assert verification_result[0]['tamper'] == 1
Exemplo n.º 9
0
    def verify_backup(self, host, dir_to_verify, logger):
        verifier_obj = Verifier(host=host,
                                dir_to_verify=dir_to_verify,
                                logger=logger)

        backup_lock.acquire()
        try:
            verifier_obj.setup()
        finally:
            backup_lock.release()

        return verifier_obj.verify()
 def test_classification(self):
     source_dir = '../data/renditions/1080p/'
     rendition_dirs = [
         ('../data/renditions/720p_watermark/', True),
         ('../data/renditions/720p_60-24fps/', False),
     ]
     files = None
     debug = False
     n_samples = 10
     gpu = False
     src_videos = sorted(glob.glob(source_dir + '/*'))
     results = []
     verifier = Verifier(
         n_samples,
         'http://storage.googleapis.com/verification-models/verification-metamodel-2020-07-06.tar.xz',
         gpu, False, debug)
     for src in tqdm.tqdm(src_videos):
         filename = src.split(os.path.sep)[-1]
         if files is not None and not filename in files:
             continue
         i = 0
         for rendition_dir, tamper in rendition_dirs:
             rendition_name = rendition_dir.strip(os.path.sep).split(
                 os.path.sep)[-1]
             rend_path = rendition_dir + os.path.sep + filename
             if not os.path.exists(rend_path):
                 continue
             np.random.seed(123)
             random.seed(123)
             verification_result = verifier.verify(src, [{
                 'uri': rend_path
             }])
             score_meta = float(verification_result[0]["tamper"])
             score_ul = float(verification_result[0]["tamper_ul"])
             score_sl = float(verification_result[0]["tamper_sl"])
             res = {
                 'score': score_meta,
                 'score_ul': score_ul,
                 'score_sl': score_sl
             }
             res['master_filename'] = filename
             res['rendition_type'] = rendition_name
             res['is_tamper'] = tamper
             results.append(res)
     df_res: pd.DataFrame = pd.DataFrame(results)
     df_res.set_index(['master_filename', 'rendition_type'], inplace=True)
     df_res.sort_index(inplace=True)
     df_res['prediction'] = df_res['score'] > 0
     print(df_res)
     # assert accuracy
     assert np.sum(
         df_res.prediction == df_res.is_tamper) / len(df_res) >= 0.8
	def __init__(this):
		Verifier.__init__(this)
		
		this.p_booter 	  = 0.1001
		this.p_non_booter = 0.8999

		# calculated: see 'naive_bayes_probabilities.txt'
		this.p_booter_characteristics     = [
    		0.97, 0.93, 0.94, 0.37, 0.89, 0.89, 0.38, 0.72, 0.85, 0.74, 0.92, 0.52, 0.22, 0.44, 0.82
		]
		this.p_non_booter_characteristics = [
			0.23, 0.80, 0.67, 0.06, 0.14, 0.62, 0.28, 0.18, 0.35, 0.16, 0.20, 0.19, 0.19, 0.36, 0.66
		]
 def __init__(this):
     Verifier.__init__(this)
     # below we add two vectors for comparing between what feature vector is of the ideal Booter
     # and what feature vector is of the ideal non-Booter.
     # currently they are defined as completely 0.0 or 1.0; experimenting wtih different values e.g.
     # averages of Booter training dataset, numbers more often associated with Booters and so on did
     # not generate consistently better results (sometimes it did, sometimes it didn't). We thus keep
     # the [1.0,...,1.0] vector as this directly corresponds with the individual distance each element
     # has to its maximum value.
     this.vector_booter = [
         1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
         1.0, 1.0
     ]
    def __init__(self, numTilings = 1, num_bins = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.num_bins = num_bins
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
    
        self.mem_size = 1048576 # 16,384 or 8,192 or 1,048,576 or 8,388,608 or 16,777,216 or 33,554,432
        self.prediction = None
        self.current_prediction = 0
        self.delta = 0
        self.lastS = None
        self.previous_tiles = [0 for item in range(self.numTilings)]
#         self.previous_state = [None for item in range(self.numTilings*(self.num_bins)**10)]
        self.previous_prediction = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.traceH = TraceHolder(self.mem_size, 0.01, 1000) # TraceHolder(mem, minT, maxN)
        self.F = [0 for item in range(self.numTilings)] # the indices of the returned tiles will go in here
        self.theta = [0 for item in range(self.mem_size)] # weight vector.
#         self.weights = [0 for item in range(self.numTilings*(self.num_bins)**10)]
#         self.e_trace = [0 for item in range(self.numTilings*(self.num_bins)**10)] # added by Ann
        self.cTable = CollisionTable(cTableSize, 'super safe') # look into this...
#         stats = self.cTable.stats()
#         print stats
        self.verifier = Verifier(self.rlGamma)
Exemplo n.º 14
0
    def test_verification_speed(self):
        """
        Sanity test to ensure that transcoding speed is significantly lower than verification
        @return:
        """
        np.random.seed(123)
        random.seed(123)

        n_samples = 10
        n_tests = 10
        codec = 'libx264'

        v = Verifier(
            n_samples,
            'http://storage.googleapis.com/verification-models/verification-metamodel-fps2.tar.xz',
            False, False, False)
        res_2s = self.get_verification_and_transcoding_speed(
            v, 'testing/tests/data/master_2s_1080.mp4',
            'testing/tests/data/rend_2s_720_bw.mp4', n_tests, codec)
        res_4s = self.get_verification_and_transcoding_speed(
            v, 'testing/tests/data/master_4s_1080.mp4',
            'testing/tests/data/rend_4s_720_bw.mp4', n_tests, codec)

        print(f'Verification vs transcoding for 2s video (1080 to 720):')
        print(res_2s)

        print(f'Verification vs transcoding for 4s video (1080 to 720):')
        print(res_4s)

        assert res_2s['best_verification_time'] < res_2s[
            'best_transcoding_time']
        assert res_4s['best_verification_time'] < res_4s[
            'best_transcoding_time']
Exemplo n.º 15
0
def init(conf):
    """Creates all the required object to run the program

    Args:
        conf: Config object.

    Returns:
        ControlServer object.
    """
    # Setup logger
    logPath = path.join(conf['dataDir'], 'logs/server.log')
    logFile = DailyLogFile.fromFullPath(logPath)
    log.startLogging(logFile)

    # Create DHT Server
    server = yield initServer(conf['serverPort'], conf['bootStrapServer'],
                              8468)
    dht = DHTServer(server)

    # Create key and cert management objects
    keyStore = keystore.KeyStore(conf['dataDir'])
    keys = KeyManager(dht, keyStore)
    certs = CertManager(dht, keyStore)
    aclDir = path.join(conf['dataDir'], 'acl')
    verifier = Verifier(certs, keyStore, aclDir, conf["searchDepth"])

    # Return value so it doesn't get garbage collected
    returnValue(
        ControlServer(conf['localPort'], dht, keys, certs, verifier, keyStore,
                      reactor))
Exemplo n.º 16
0
    def __init__(actions, self, numTilings = 1, parameters = 2,rlAlpha = 0.5, rlLambda = 0.9,
                 rlGamma = 0.9, rlEpsilon = 0.1, cTableSize=0, action_selection = 'softmax'):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
        self.rlEpsilon = rlEpsilon
        self.action_selection = action_selection

        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.lastAction = None
        self.currentAction = None

        self.actions = actions # an array of actions which we can select from
        self.traceH = TraceHolder((self.numTilings**(self.parameters)+1), self.rlLambda, 1000)
        self.F = [[0 for item in range(self.numTilings)] for i in range(actions)] # the indices of the returned tiles will go in here
        self.q_vals = [0 for i in range(actions)]
        for action in actions:
            self.q.append(action,[0 for item in range((self.numTilings**(self.parameters+1))+1)]) # action and weight vec
        self.cTable = CollisionTable(cTableSize, 'safe') # look into this...
        self.verifier = Verifier(self.rlGamma)
Exemplo n.º 17
0
    def __init__(self, data_loader, config, val_loader):

        # Data loader
        self.data_loader = data_loader
        self.verifier = Verifier(val_loader, config)

        # Model hyper-parameters
        self.imsize = config.imsize
        self.parallel = config.parallel
        self.arch = config.arch

        # tensorboard
        self.writer = SummaryWriter('runs/training' + '_' + self.arch)

        self.epochs = config.epochs
        self.batch_size = config.batch_size
        self.num_workers = config.num_workers
        self.total_iters = self.epochs * len(self.data_loader)

        self.classes = config.classes
        self.g_lr = config.g_lr
        self.momentum = config.momentum
        self.weight_decay = config.weight_decay
        self.pretrained_model = config.pretrained_model  # int type
        self.indicator = False if self.pretrained_model > 0 else True

        self.img_path = config.img_path
        self.label_path = config.label_path
        self.model_save_path = config.model_save_path
        self.sample_path = config.sample_path
        self.sample_step = config.sample_step
        self.tb_step = config.tb_step

        # Path
        self.sample_path = osp.join(config.sample_path, self.arch)
        self.model_save_path = osp.join(config.model_save_path, self.arch)

        self.build_model()

        # Start with trained model
        if self.pretrained_model:
            self.load_pretrained_model()

        self.lr_scheduler = WarmupPolyLR(
            self.g_optimizer, max_iters=self.total_iters, power=0.9,
            warmup_factor=1.0 / 3, warmup_iters=500,
            warmup_method='linear')
Exemplo n.º 18
0
    def __init__(self, connection_string):
        super(VerifyTalent, self).__init__('VerifyTalent', connection_string)
        self.register_function('verify', self.__verify)
        self.register_function('sign', self.__sign)
        self.register_function('signerdid', self.__signerdid)
        self.verifier = Verifier()

        jwk_raw = os.getenv('JWK')

        if jwk_raw and jwk_raw != '':
            try:
                self.key = jwk.JWK.from_json(jwk_raw)
                self.logger.info('Successfully stored Key from JWK: {}'.format(
                    self.key))
            # pylint: disable=broad-except
            except Exception as ex:
                self.logger.error('Could not import JWK: {}'.format(ex))
Exemplo n.º 19
0
def main():
    while True:
        while True:
            try:
                inputFile = input("input file path: ")
                ifhand = open(inputFile)
            except FileNotFoundError:
                print("File not Found.")
                continue
            break
        message = ifhand.readline()
        genFunc = ifhand.readline()
        ## handling wrong inputs-> eg: extra spaces or \n
        message = message.replace(" ", "")
        message = message.replace("\n", '')
        genFunc = genFunc.replace(" ", "")
        genFunc = genFunc.replace("\n", "")
        print(
            "Commands:\n  Generator only -> 1\n  Generator-verifier -> 2\n  Generator-Verifier-Alter-Verifier->3\n"
        )
        command = input("Enter command number:")
        if command == '1':
            print("Transmitted message: " + generator(message, genFunc))
        elif command == '2':
            transmittedMessage = generator(message, genFunc)
            print("transmitted message: " + transmittedMessage)
            Verifier(transmittedMessage, genFunc)
        elif command == '3':
            transmittedMessage = generator(message, genFunc)
            print("transmitted message: " + transmittedMessage)
            Verifier(transmittedMessage, genFunc)
            bit_position = input("Which bit to alter? ")
            altered_msg = Alter(transmittedMessage, bit_position)
            Verifier(altered_msg, genFunc)
        else:
            print("Choose a valid command number")
            continue
        exit = input("exit?Y/N\n")
        if exit == 'y' or exit == 'Y':
            break
        else:
            print("__________________________\n\n")
            continue
Exemplo n.º 20
0
class VerifyTalent(FunctionTalent):
    def __init__(self, connection_string):
        super(VerifyTalent, self).__init__('VerifyTalent', connection_string)
        self.register_function('verify', self.__verify)
        self.register_function('sign', self.__sign)
        self.register_function('signerdid', self.__signerdid)
        self.verifier = Verifier()

        jwk_raw = os.getenv('JWK')

        if jwk_raw and jwk_raw != '':
            try:
                self.key = jwk.JWK.from_json(jwk_raw)
                self.logger.info('Successfully stored Key from JWK: {}'.format(
                    self.key))
            # pylint: disable=broad-except
            except Exception as ex:
                self.logger.error('Could not import JWK: {}'.format(ex))

    # pylint: disable=unused-argument
    def __verify(self, payload, ev, evtctx):
        self.logger.info(
            'Received verification request for value {}'.format(payload))
        result = self.verifier.verify(payload)

        # If error message
        if isinstance(result, str):
            self.logger.error('Verify failed: {}'.format(result))
            return result
        else:
            return self.verifier.verify(payload).decode('utf-8')

    # pylint: disable=unused-argument
    async def __sign(self, payload, ev, evtctx):
        if self.key is None or self.key == '':
            return 'no key given'

        return await self.verifier.sign(payload, self.key)

    # pylint: disable=unused-argument
    async def __signerdid(self, payload, ev, evtctx):
        return await self.verifier.signer_did(payload)
Exemplo n.º 21
0
class AEGG(object):
    def __init__(self, binary):
        self.binary = os.path.abspath(binary)
        self.payloads = []

        self.bug_finder = BugFinder(self.binary)
        self.analyzer = Analyzer(self.binary)
        self.exploiter = Exploiter(self.binary)
        self.verifier = Verifier(self.binary)

    def _save(self, payload, file_name):
        with open(file_name, 'w') as f:
            f.write(payload)

    def exploit_gen(self, path):
        analysis = self.analyzer.analyze(path)
        for payload in self.exploiter.generate(path, analysis):
            if not payload:
                break
            if self.verifier.verify(payload):
                self.payloads.append(payload)
                l.info('Generated!')
                return True
        l.info('Can not generate any payload.')
        return False

    def hack(self, n=None, paths=None):
        """
        n: number paths want to check
        paths: angr path object
        """
        n = 1 if n is None else n
        paths = [] if paths is None else paths

        l.info('Start hacking ...')
        while len(paths) < n:
            found_paths = self.bug_finder.find()
            if found_paths is None:
                break
            paths.extend(found_paths)
        for path in paths:
            self.exploit_gen(path)
        l.info('Completed.')

    def save(self, file_name=None):
        file_name = self.binary if file_name is None else file_name
        if len(self.payloads) == 1:
            ext = 'py' if self.payloads[0].ptype == 'script' else 'exp'
            self._save(self.payloads[0].content, '%s.%s' % (file_name, ext))
        else:
            for i in xrange(len(self.payloads)):
                ext = 'py' if self.payloads[0].ptype == 'script' else 'exp'
                self._save(self.payloads[i].content,
                           '%s-%d.%s' % (file_name, i, ext))
Exemplo n.º 22
0
    def __init__(actions,
                 self,
                 numTilings=1,
                 parameters=2,
                 rlAlpha=0.5,
                 rlLambda=0.9,
                 rlGamma=0.9,
                 rlEpsilon=0.1,
                 cTableSize=0,
                 action_selection='softmax'):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
        self.rlEpsilon = rlEpsilon
        self.action_selection = action_selection

        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.lastAction = None
        self.currentAction = None

        self.actions = actions  # an array of actions which we can select from
        self.traceH = TraceHolder((self.numTilings**(self.parameters) + 1),
                                  self.rlLambda, 1000)
        self.F = [[0 for item in range(self.numTilings)]
                  for i in range(actions)
                  ]  # the indices of the returned tiles will go in here
        self.q_vals = [0 for i in range(actions)]
        for action in actions:
            self.q.append(action, [
                0
                for item in range((self.numTilings**(self.parameters + 1)) + 1)
            ])  # action and weight vec
        self.cTable = CollisionTable(cTableSize, 'safe')  # look into this...
        self.verifier = Verifier(self.rlGamma)
Exemplo n.º 23
0
class VerifiedTxDb(BaseTxDb):
    def __init__(self, model, config):
        super(VerifiedTxDb, self).__init__(model, config)
        self.verifier = Verifier(self.model.get_blockchain_state())
        self.confirmed_txs = set()

    def identify_tx_status(self, txhash):
        if txhash in self.confirmed_txs:
            return TX_STATUS_CONFIRMED
        try:
            verified = self.verifier.verify_merkle(txhash)
        except HTTPError:
            verified = False
        if verified:
            confirmations = self.verifier.get_confirmations(txhash)
            if confirmations == 0:
                return TX_STATUS_UNCONFIRMED
            else:
                self.confirmed_txs.add(txhash)
                return TX_STATUS_CONFIRMED
        else:
            return TX_STATUS_INVALID
Exemplo n.º 24
0
class VerifiedTxDb(BaseTxDb):
    def __init__(self, model, config):
        super(VerifiedTxDb, self).__init__(model, config)
        self.verifier = Verifier(self.model.get_blockchain_state())
        self.confirmed_txs = set()

    def identify_tx_status(self, txhash):
        if txhash in self.confirmed_txs:
            return TX_STATUS_CONFIRMED
        try:
            verified = self.verifier.verify_merkle(txhash)
        except HTTPError:
            verified = False
        if verified:
            confirmations = self.verifier.get_confirmations(txhash)
            if confirmations == 0:
                return TX_STATUS_UNCONFIRMED
            else:
                self.confirmed_txs.add(txhash)
                return TX_STATUS_CONFIRMED
        else:
            return TX_STATUS_INVALID
Exemplo n.º 25
0
def main():
    print("Programa desarrollado en Python 3.7 por:")
    print("David Armando Rodríguez Varón - 20181020041")
    print("Juan Sebastián Sánchez Tabares - 20181020008")
    print("")
    print("Bienvenido al verificador de expresiones lógicas.")
    print(
        "Este programa realiza la verificación de si una expresión lógica se encuentra bien escrita."
    )
    print("Para lo cual debe tener en cuenta lo siguiente:")
    print(
        "1. No debe utilizar los siguientes paréntesis para encerrar '{' '}'")
    print("2. De igual forma tampoco los siguientes '[' ']'")
    print("3. Por ende únicamente es válido el uso de los paréntesis. '(' ')'")
    print("4. Como ejemplo de la manera en la que debe escribir la expresión:")
    print("((p and q) then (q iff (r or not s)))")
    print(
        "5. Como se puede observar todo va entre parentesis de forma ordenada."
    )
    print(
        "6. No son considerados válidos los espacios antes o despues de los paréntesis."
    )
    print(
        "Si se incumple cualquiera de las condiciones anteriores se considerará como una expresión no válida."
    )
    print("")
    print("EMPECEMOS")
    print("Ingrese la expresión lógica a ser verificada:")
    expr = input()
    ver = Verifier()
    valid, chars, variables, conds = ver.evaluate(expr)
    if valid == True:
        print("La expresión es válida")
        print("Los términos son: \n{}".format(chars))
        print("Las variables usadas son: ", variables)
        print("Los condicionales usados son: ", conds)
    else:
        print("La expresión NO es válida")
Exemplo n.º 26
0
    def test_zip_verifier(self):

        token = AccessToken.objects.get(id=4)

        zipName = Download.objects.get(tokenID=token).folder
        v = Verifier(token)

        v.createZIPtoVerify()

        #now the DB should be updated
        dUp = Download.objects.get(tokenID=token)
        self.assertTrue(dUp.verificationZIP)

        #now create the same zip and verify the signatures
        tempZip = os.path.join(settings.VERIFIED_ZIP, zipName + ".zip.test")
        newVerifier = Verifier(token)
        newVerifier.createZIP(".zip.test")

        # add path for crypto
        cryptoPath = os.path.join(os.path.dirname(settings.BASE_DIR), "finder")

        if not cryptoPath in sys.path:
            sys.path.insert(1, cryptoPath)
            del cryptoPath

        import crypto

        #verify the signature
        h = crypto.sha256File(tempZip)
        res = crypto.verifyRSAsignatureSHA256(h, dUp.verificationZIPSignature,
                                              settings.PUB_KEY)
        self.assertTrue(res)

        #generate a timestamp request
        v.createTimestampRequest()
        requestCreated = os.path.isfile(
            os.path.join(settings.VERIFIED_ZIP, dUp.folder + ".tsrequest"))
        self.assertTrue(requestCreated)
Exemplo n.º 27
0
    def __init__(self, *args, **kwargs):
        super(Multijet, self).__init__(*args, **kwargs)
        self.dp = None
        '''
        the verifiers is a thread dict of cpid=>verify_thread
        '''
        self.verifiers = {}
        # f = FlowServer()
        # f.start()
        t = TriggerServer(self.on_trigger)
        t.start()

        self.msg_buf = {}
        '''
        add more cps here to start multi verify threads
        '''
        self.cps = [100]

        self.queue = Queue()
        for cp in self.cps:
            verifier = Verifier(int(cp), queue=self.queue)
            self.verifiers[int(cp)] = verifier
            verifier.start()
Exemplo n.º 28
0
def set_up_scene():
    global simulation
    gui.clear_scene()
    gui.textEdits[0].clear()
    s = gui.get_field(0)
    scene = read_scene.read_scene(s)
    verifier = Verifier(*scene)

    gui_robots = []
    gui_objectives = []
    gui_obstacles = []
    points = []

    draw_grid(100)

    blue = Color("blue")
    colors = list(blue.range_to(Color("red"), len(scene[0])))
    colors = [QColor(color.get_hex()) for color in colors]

    for i, robot in enumerate(scene[0]):
        color = colors[i % len(colors)]
        gui_robots.append(
            gui.add_polygon(unit_square(robot),
                            line_color=QtCore.Qt.transparent,
                            fill_color=color))
        points.append(tuple(robot))

    for obstacle in scene[2]:
        color = QtCore.Qt.gray
        gui_obstacles.append(
            gui.add_polygon(unit_square(obstacle),
                            line_color=QtCore.Qt.transparent,
                            fill_color=color))
        points.append(tuple(obstacle))

    for i, objective in enumerate(scene[1]):
        color = colors[i % len(colors)]
        gui_objectives.append(
            gui.add_polygon(unit_square(objective),
                            line_color=QColor(color),
                            fill_color=QtCore.Qt.transparent))
        points.append(tuple(objective))

    simulation = Simulation(gui_robots, gui_objectives, gui_obstacles,
                            verifier)
    center = bounding_rect_center(points)
    gui.graphicsView.translate(center[0], center[1])
    gui.graphicsView.centerOn(center[0], center[1])
Exemplo n.º 29
0
 def __init__(self, numTilings = 1, parameters = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
     """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
     self.numTilings = numTilings
     self.tileWidths = list()
     self.parameters = parameters
     self.rlAlpha = rlAlpha
     self.rlLambda = rlLambda
     self.rlGamma = rlGamma
 
     self.prediction = None
     self.lastS = None
     self.lastQ = None
     self.lastPrediction = None
     self.lastReward = None
     self.traceH = TraceHolder((self.numTilings**(self.parameters)+1), self.rlLambda, 1000)
     self.F = [0 for item in range(self.numTilings)] # the indices of the returned tiles will go in here
     self.theta = [0 for item in range((self.numTilings**(self.parameters+1))+1)] # weight vector.
     self.cTable = CollisionTable(cTableSize, 'safe') # look into this...
     self.verifier = Verifier(self.rlGamma)
 def __init__(self, numTilings = 2, num_bins = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
     self.numTilings = numTilings
     self.tileWidths = list()
     self.num_bins = num_bins
     self.rlAlpha = rlAlpha
     self.rlLambda = rlLambda
     self.rlGamma = rlGamma
 
     self.prediction = None
     self.lastS = None
     self.lastQ = None
     self.lastPrediction = None
     self.lastReward = None
     self.F = [0 for item in range(self.numTilings)]
     self.F2 = [0 for item in range(self.numTilings)]
     self.theta = [0 for item in range((self.numTilings**(self.num_bins+1))+1)]
     self.cTable = CollisionTable(cTableSize, 'safe')
     self.update(None, None)
     self.e = [0 for item in range((self.numTilings**(self.num_bins+1))+1)]
     self.verifier = Verifier(self.rlGamma)
    def test_verifier(self):
        verifier = Verifier(self.crs)

        self.assertEqual(verifier.verify(self.ciphertexts, self.proof),
                         (True, True, True))

        prevshuffle = self.proof['shuffled_ciphertexts']
        g1 = self.crs.lff.gen1()
        g2 = self.crs.lff.gen2()
        self.proof['shuffled_ciphertexts'][3] = ((g1, g1, g1), (g2, g2, g2))

        self.assertEqual(verifier.verify(self.ciphertexts, self.proof),
                         (True, False, False))

        self.proof['shuffled_ciphertexts'] = prevshuffle
        self.crs.g1alpha = self.crs.g1alpha * 2
        verifier = Verifier(self.crs)

        self.assertEqual(verifier.verify(self.ciphertexts, self.proof),
                         (False, False, False))
Exemplo n.º 32
0
	def test_zip_verifier(self):

		token = AccessToken.objects.get(id=4)

		zipName = Download.objects.get(tokenID=token).folder
		v = Verifier(token)

		v.createZIPtoVerify()

		#now the DB should be updated
		dUp = Download.objects.get(tokenID=token)
		self.assertTrue(dUp.verificationZIP)
		
		#now create the same zip and verify the signatures
		tempZip = os.path.join(settings.VERIFIED_ZIP,zipName+".zip.test")
		newVerifier = Verifier(token)
		newVerifier.createZIP(".zip.test")

		# add path for crypto
		cryptoPath = os.path.join(os.path.dirname(settings.BASE_DIR), "finder")

		if not cryptoPath in sys.path:
			sys.path.insert(1, cryptoPath)
			del cryptoPath

		import crypto

		#verify the signature
		h = crypto.sha256File(tempZip)
		res = crypto.verifyRSAsignatureSHA256(h,dUp.verificationZIPSignature,settings.PUB_KEY)
		self.assertTrue(res)
	
		#generate a timestamp request
		v.createTimestampRequest()
		requestCreated = os.path.isfile(os.path.join(settings.VERIFIED_ZIP,dUp.folder+".tsrequest"))
		self.assertTrue(requestCreated)
Exemplo n.º 33
0
def create_app():
    app = Flask(__name__, instance_relative_config=True)

    app.config.from_pyfile(os.path.join(os.path.dirname(__file__),
                                        'config.py'),
                           silent=False)

    JWTManager(app)
    CORS(app)

    import auth
    import student
    import teacher
    import admin

    app.extensions['student_reset_password_verifier'] = Verifier(
        'Ось код для відновлення паролю:', 'Відновлення паролю')
    app.extensions['teacher_reset_password_verifier'] = Verifier(
        'Ось код для відновлення паролю:', 'Відновлення паролю')
    app.extensions['student_account_changes_verifier'] = Verifier(
        'Ось код для підтвердження змін данних аккаунту:',
        'Підтвердження змін')
    app.extensions['teacher_account_changes_verifier'] = Verifier(
        'Ось код для підтвердження змін данних аккаунту:',
        'Підтвердження змін')
    app.extensions['teacher_email_verifier'] = Verifier(
        'Ось код для підтвердження електронної пошти:', 'Підтвердження пошти')
    app.extensions['student_email_verifier'] = Verifier(
        'Ось код для підтвердження електронної пошти:', 'Підтвердження пошти')

    @app.errorhandler(HTTPException)
    def handle_exception(e):
        return jsonify({
            "code": e.code,
            "name": e.name,
            "description": e.description,
        }), e.code

    app.register_blueprint(auth.bp, url_prefix="/api/auth")
    app.register_blueprint(student.bp, url_prefix="/api/student")
    app.register_blueprint(teacher.bp, url_prefix="/api/teacher")
    app.register_blueprint(admin.bp, url_prefix="/api/admin")

    return app
Exemplo n.º 34
0
    def verify_backup(self, host, dir_to_verify, logger):
        verifier_obj = Verifier(host=host, dir_to_verify=dir_to_verify,
				logger=logger)
        verifier_obj.setup()

        return verifier_obj.verify()
Exemplo n.º 35
0
Arquivo: kin.py Projeto: Karumi/Kin
def check_errors(file):
    verifier = Verifier()
    errors = verifier.check_errors(file)
    return errors
Exemplo n.º 36
0
	def __init__(this, use_weights = True):
		Verifier.__init__(this)
		this.LoadData(use_weights)
Exemplo n.º 37
0
class Q_learning(Learner):


    def __init__(actions, self, numTilings = 1, parameters = 2,rlAlpha = 0.5, rlLambda = 0.9,
                 rlGamma = 0.9, rlEpsilon = 0.1, cTableSize=0, action_selection = 'softmax'):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
        self.rlEpsilon = rlEpsilon
        self.action_selection = action_selection

        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.lastAction = None
        self.currentAction = None

        self.actions = actions # an array of actions which we can select from
        self.traceH = TraceHolder((self.numTilings**(self.parameters)+1), self.rlLambda, 1000)
        self.F = [[0 for item in range(self.numTilings)] for i in range(actions)] # the indices of the returned tiles will go in here
        self.q_vals = [0 for i in range(actions)]
        for action in actions:
            self.q.append(action,[0 for item in range((self.numTilings**(self.parameters+1))+1)]) # action and weight vec
        self.cTable = CollisionTable(cTableSize, 'safe') # look into this...
        self.verifier = Verifier(self.rlGamma)


    def chooseAction(self, features):
        for action in range(self.actions):
            self.loadFeatures(featureVector=self.F[action], stateVars=features)
            self.q_vals[action] = self.computeQ(action)
        return self.eGreedy()


    def eGreedy(self):
        if random.random() < self.rlEpsilon:
            return random.randrange(self.actions) # random action
        else:
            max_index, max_value = max(enumerate(self.q_vals), key=operator.itemgetter(1))
            return max_index # best action


    def update(self, features, target=None):
        # learning step
        if features != None:
            self.learn(features, target, self.currentAction)

        self.lastAction = self.currentAction
        self.currentAction = self.chooseAction(features)

        # action selection step
        return self.currentAction


    def learn(self, features, reward, action):
        self.loadFeatures(features, self.F)
        currentq = self.computeQ(action)

        if self.lastS != None and self.lastAction != None: # if we're past the first step
            delta = reward - self.lastQ
            delta += self.rlGamma * currentq
            amt = delta * (self.rlAlpha / self.numTilings)

            for i in self.traceH.getTraceIndices():
                self.theta[i] += amt * self.traceH.getTrace(i)

            max_action, max_value = max(enumerate(self.q_vals), key=operator.itemgetter(1))
            if action == max_action:
                self.traceH.decayTraces(self.rlGamma*self.rlLambda)
            else:
                self.traceH.decayTraces(0)
            self.traceH.replaceTraces(self.F[action])

        self.lastQ = currentq
        self.lastS = features
        self.num_steps+=1
        self.verifier.updateReward(reward)
        self.verifier.updatePrediction(self.prediction)


    def loadFeatures (self, stateVars, featureVector):
        loadtiles(featureVector, 0, self.numTilings, self.numTilings**(self.parameters), stateVars)
        print "featureVector " + str(len(self.theta))
        """
        As provided in Rich's explanation
               tiles                   ; a provided array for the tile indices to go into
               starting-element        ; first element of "tiles" to be changed (typically 0)
               num-tilings             ; the number of tilings desired
               memory-size             ; the number of possible tile indices
               floats                  ; a list of real values making up the input vector
               ints)                   ; list of optional inputs to get different hashings
        """

    def computeQ (self, a):
        "compute value of action for current F and theta"
        q = 0
        for i in self.F[a]:
            q += self.theta[i]
        return q
Exemplo n.º 38
0
class TDLambdaLearner(Learner):
    """
    Note: the TileCoder is Rich's Python version, which is still in Alpha.
    See more at: http://webdocs.cs.ualberta.ca/~sutton/tiles2.html#Python%20Versions
    
        Collision Table notes:
            cTableSize is the size that the collision table will be instantiated to. The size must be  a power of two.
            In calls for get tiles, the collision table is used in stead of memory_size, as it already has it.
    
    """
    def __init__(self, numTilings = 1, parameters = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.tileWidths = list()
        self.parameters = parameters
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
    
        self.prediction = None
        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.traceH = TraceHolder((self.numTilings**(self.parameters)+1), self.rlLambda, 1000)
        self.F = [0 for item in range(self.numTilings)] # the indices of the returned tiles will go in here
        self.theta = [0 for item in range((self.numTilings**(self.parameters+1))+1)] # weight vector.
        self.cTable = CollisionTable(cTableSize, 'safe') # look into this...
        self.verifier = Verifier(self.rlGamma)


    def update(self, features, target=None):
        if features != None:
            self.learn(features, target)
            return self.prediction
        else: return None


    def learn(self, state, reward):
        self.loadFeatures(state, self.F)
        currentq = self.computeQ()
        if self.lastS != None:
            delta = reward - self.lastQ
            delta += self.rlGamma * currentq
            amt = delta * (self.rlAlpha / self.numTilings)
            for i in self.traceH.getTraceIndices():
                self.theta[i] += amt * self.traceH.getTrace(i)
            self.traceH.decayTraces(self.rlGamma)
            self.traceH.replaceTraces(self.F)
        self.lastQ = currentq
        self.lastS = state
        self.prediction = currentq
        self.num_steps+=1
        self.verifier.updateReward(reward)
        self.verifier.updatePrediction(self.prediction)
        

    def computeQ(self):
        q = 0
        for i in self.F:
            q += self.theta[i]
        return q


    def loadFeatures(self, stateVars, featureVector):
        loadtiles(featureVector, 0, self.numTilings, self.numTilings**(self.parameters), stateVars)
        print "featureVector " + str(len(self.theta))
        """ 
        As provided in Rich's explanation
               tiles                   ; a provided array for the tile indices to go into
               starting-element        ; first element of "tiles" to be changed (typically 0)
               num-tilings             ; the number of tilings desired
               memory-size             ; the number of possible tile indices
               floats                  ; a list of real values making up the input vector
               ints)                   ; list of optional inputs to get different hashings
        """


    def loss(self, x, r, prev_state=None):
        """
        Returns the TD error assuming reward r given for 
        transition from prev_state to x
        If prev_state is None will use leftmost element in exp_queue
        """
        if prev_state is None:
            if len(self.exp_queue) < self.horizon:
                return None
            else:
                prev_state = self.exp_queue[0][0]

        vp = r + self.gamma * self.value(x)
        v = self.value(prev_state)
        delta = vp - v
        return delta


    def predict (self,x):
        self.loadFeatures(x, self.F)
        return self.computeQ()
Exemplo n.º 39
0
 def __init__(self, model, config):
     super(VerifiedTxDb, self).__init__(model, config)
     self.verifier = Verifier(self.model.get_blockchain_state())
     self.confirmed_txs = set()
Exemplo n.º 40
0
class Trainer(object):
    """Training pipline"""

    def __init__(self, data_loader, config, val_loader):

        # Data loader
        self.data_loader = data_loader
        self.verifier = Verifier(val_loader, config)

        # Model hyper-parameters
        self.imsize = config.imsize
        self.parallel = config.parallel
        self.arch = config.arch

        # tensorboard
        self.writer = SummaryWriter('runs/training' + '_' + self.arch)

        self.epochs = config.epochs
        self.batch_size = config.batch_size
        self.num_workers = config.num_workers
        self.total_iters = self.epochs * len(self.data_loader)

        self.classes = config.classes
        self.g_lr = config.g_lr
        self.momentum = config.momentum
        self.weight_decay = config.weight_decay
        self.pretrained_model = config.pretrained_model  # int type
        self.indicator = False if self.pretrained_model > 0 else True

        self.img_path = config.img_path
        self.label_path = config.label_path
        self.model_save_path = config.model_save_path
        self.sample_path = config.sample_path
        self.sample_step = config.sample_step
        self.tb_step = config.tb_step

        # Path
        self.sample_path = osp.join(config.sample_path, self.arch)
        self.model_save_path = osp.join(config.model_save_path, self.arch)

        self.build_model()

        # Start with trained model
        if self.pretrained_model:
            self.load_pretrained_model()

        self.lr_scheduler = WarmupPolyLR(
            self.g_optimizer, max_iters=self.total_iters, power=0.9,
            warmup_factor=1.0 / 3, warmup_iters=500,
            warmup_method='linear')

    def train(self):
        if self.pretrained_model:
            start = self.pretrained_model + 1
        else:
            start = 0

        criterion = CriterionAll()
        criterion.cuda()
        best_miou = 0

        # Data iterator
        for epoch in range(start, self.epochs):
            self.G.train()

            for i_iter, batch in enumerate(self.data_loader):
                i_iter += len(self.data_loader) * epoch
                # lr = adjust_learning_rate(self.g_lr,
                #                           self.g_optimizer, i_iter, self.total_iters)

                imgs, labels, edges = batch
                size = labels.size()
                imgs = imgs.cuda()
                labels = labels.cuda()

                if self.arch in __BA__:
                    edges = edges.cuda()
                    preds = self.G(imgs)
                    c_loss = criterion(preds, [labels, edges])
                    labels_predict = preds[0][-1]
                else:
                    labels_predict = self.G(imgs)
                    c_loss = cross_entropy2d(
                        labels_predict, labels.long(), reduction='mean')

                self.reset_grad()
                c_loss.backward()
                # Note:这里为了简便没有对优化器进行参数断点记录!!!
                self.g_optimizer.step()
                self.lr_scheduler.step(epoch=None)

                # info on tensorboard
                if (i_iter + 1) % self.tb_step == 0:
                    # scalr info on tensorboard
                    self.writer.add_scalar(
                        'cross_entrophy_loss', c_loss.data, i_iter)
                    self.writer.add_scalar(
                        'learning_rate', self.g_optimizer.param_groups[0]['lr'], i_iter)

                    # image info on tensorboard
                    labels = labels[:, :, :].view(size[0], 1, size[1], size[2])
                    oneHot_size = (size[0], self.classes, size[1], size[2])
                    labels_real = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
                    labels_real = labels_real.scatter_(1, labels.data.long().cuda(), 1.0)

                    label_batch_predict = generate_label(labels_predict, self.imsize)
                    label_batch_real = generate_label(labels_real, self.imsize)
                    img_combine = imgs[0]
                    real_combine = label_batch_real[0]
                    predict_combine = label_batch_predict[0]
                    for i in range(1, self.batch_size):
                        img_combine = torch.cat([img_combine, imgs[i]], 2)
                        real_combine = torch.cat([real_combine, label_batch_real[i]], 2)
                        predict_combine = torch.cat([predict_combine, label_batch_predict[i]], 2)
                    all_combine = torch.cat([denorm(img_combine.cpu().data), real_combine, predict_combine], 1)
                    self.writer.add_image('imresult/img-gt-pred', all_combine, i_iter)
                    # self.writer.add_image('imresult/img', (img_combine.data + 1) / 2.0, i_iter)
                    # self.writer.add_image('imresult/real', real_combine, i_iter)
                    # self.writer.add_image('imresult/predict', predict_combine, i_iter)

                # Sample images in folder
                if (i_iter + 1) % self.sample_step == 0:
                    # labels_sample = generate_label(labels_predict, self.imsize)
                    compare_predict_color = generate_compare_results(imgs, labels_real, labels_predict, self.imsize)
                    # save_image((labels_sample.data), osp.join(self.sample_path, '{}_predict.png'.format(i_iter + 1)))
                    save_image((compare_predict_color.data), osp.join(self.sample_path, '{}_predict.png'.format(i_iter + 1)))

                print('Train iter={} of {} completed, loss={}'.format(
                    i_iter, self.total_iters, c_loss.data))
            print('----- Train epoch={} of {} completed -----'.format(epoch+1, self.epochs))

            # miou = self.verifier.validation(self.G)
            score = self.verifier.validation(self.G)
            # oacc = score["Overall Acc: \t"]
            miou = score["Mean IoU : \t"]
            print("----------------- Total Performance --------------------")
            for k, v in score.items():
                print(k, v)
            print("---------------------------------------------------")
            if miou > best_miou:
                best_miou = miou
                torch.save(self.G.state_dict(), osp.join(
                    self.model_save_path, '{}_{}_G.pth'.format(str(epoch), str(round(best_miou, 4)))))

    def build_model(self):
        self.G = get_model(self.arch, pretrained=self.indicator).cuda()

        if self.parallel:
            self.G = nn.DataParallel(self.G)
        # Loss and optimizer
        self.g_optimizer = torch.optim.SGD(filter(
            lambda p: p.requires_grad, self.G.parameters()), self.g_lr, self.momentum, self.weight_decay)

    def load_pretrained_model(self):
        self.G.load_state_dict(torch.load(osp.join(
            self.model_save_path, '{}_G.pth'.format(self.pretrained_model))))
        print('Loaded trained models (step: {})...!'.format(self.pretrained_model))

    def reset_grad(self):
        self.g_optimizer.zero_grad()
Exemplo n.º 41
0
 def setUp(self):
     self.verifier = Verifier(MODEL_PATH, ARCHITECTURE, MODEL_WEIGHTS)
Exemplo n.º 42
0
	def test_verify_timestamp(self):

		v = Verifier(None)
		v.tsRequest = os.path.join(settings.VERIFIED_ZIP,"1679091c5a880faf6fb5e6087eb1b2dc.tsrequest")
		v.tsResponse = os.path.join(settings.VERIFIED_ZIP,"1679091c5a880faf6fb5e6087eb1b2dc.p7s")
		self.assertTrue(v.verifyTimestamp())
Exemplo n.º 43
0
def create_verifier():
    verifier = Verifier(config.VERIFICATION_MAX_SAMPLES, config.VERIFICATION_MODEL_URI, False, False, False)
    return verifier
class True_Online_TD2(TDLambdaLearner):
    """
        True online TD implementation
            * Has Dutch traces
    """
    def __init__(self, numTilings = 2, num_bins = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
        self.numTilings = numTilings
        self.tileWidths = list()
        self.num_bins = num_bins
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
    
        self.prediction = None
        self.lastS = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.F = [0 for item in range(self.numTilings)]
        self.F2 = [0 for item in range(self.numTilings)]
        self.theta = [0 for item in range((self.numTilings**(self.num_bins+1))+1)]
        self.cTable = CollisionTable(cTableSize, 'safe')
        self.update(None, None)
        self.e = [0 for item in range((self.numTilings**(self.num_bins+1))+1)]
        self.verifier = Verifier(self.rlGamma)
        
    def update(self, features, target=None):
        if features != None:
            self.learn(features, target)
            return self.prediction
        else: return None
    
    def learn(self, state, reward):
        
        self.loadFeatures(state, self.F)
        self.currentq = 0
        for i in self.F: # create V(s)
            self.currentq += self.theta[i] 
        
        if self.lastS != None:
            delta = reward + self.rlGamma * self.currentq - self.lastQ # create delta
            
            self.loadFeatures(self.lastS, self.F2) 
            lastQ_2 = 0
            for i in self.F2:
                lastQ_2 += self.theta[i] # create new 
            
            for i in range(len(self.e)):
                self.e[i] *= self.rlGamma*self.rlGamma
            ephi = 0
            for i in self.F2:
                ephi += self.e[i]
                
            for i in self.F2:
                self.e[i] += self.rlAlpha*(1-self.rlGamma*self.rlLambda*ephi)
            
            for i in self.F2:
                self.theta[i] += self.rlAlpha*(self.lastQ - lastQ_2)
                
            for i in range(len(self.theta)):
                self.theta[i] += delta*self.e[i]
                
        self.lastQ = self.currentq
        self.lastS = state
        self.prediction = self.currentq
        self.num_steps+=1
        self.verifier.updateReward(reward)  
        self.verifier.updatePrediction(self.prediction)
Exemplo n.º 45
0
    parser.add_argument("--infile", default="in.csv", help="csv file to calculate diff features.")
    parser.add_argument("--calctype", default=0, type=int, help="csv file to calculate diff features.")
    args = parser.parse_args()

    #infile = args.infile
    infile = "data/test_data.csv"
    srcdir = args.srcdir
    renddir = args.renddir
    calctype = args.calctype
    #calctype: 0-difffeture 1-accuracy 2-negative(skip or zero)
    
    max_samples = 10
    debug = False
    logcount = 0
    #'http://storage.googleapis.com/verification-models/verification-metamodel-fps2.tar.xz'    
    verifier = Verifier(10, 'http://storage.googleapis.com/verification-models/verification-metamodel-2020-07-06.tar.xz', False, False, debug)   
    
    outcsv = "accuracy" + datetime.datetime.now().strftime("%m%d%H%M%S") + ".csv"
    fileout = open(outcsv, 'w', newline='')
    wr = csv.writer(fileout)
    wr.writerow(['source', 'rendition', 'infertarget'])

    with open(infile, newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:            
            if row['target'] == '0':
                wrow = []
                srcfile = srcdir + "/" + row['source']
                rendfile = renddir + "/" + row['id']
                result = verifier.verify(srcfile, [{'uri': rendfile}])
                wrow.append(row['id'])
class TDLambdaLearner(Learner):
    """
    Note: the TileCoder is Rich's Python version, which is still in Alpha.
    See more at: http://webdocs.cs.ualberta.ca/~sutton/tiles2.html#Python%20Versions
    
        Collision Table notes:
            cTableSize is the size that the collision table will be instantiated to. The size must be  a power of two.
            In calls for get tiles, the collision table is used in stead of memory_size, as it already has it.
    
    """
    def __init__(self, numTilings = 1, num_bins = 2, rlAlpha = 0.5, rlLambda = 0.9, rlGamma = 0.9, cTableSize=0):
        """ If you want to run an example of the code, simply just leave the parameters blank and it'll automatically set based on the parameters. """
        self.numTilings = numTilings
        self.num_bins = num_bins
        self.rlAlpha = rlAlpha
        self.rlLambda = rlLambda
        self.rlGamma = rlGamma
    
        self.mem_size = 1048576 # 16,384 or 8,192 or 1,048,576 or 8,388,608 or 16,777,216 or 33,554,432
        self.prediction = None
        self.current_prediction = 0
        self.delta = 0
        self.lastS = None
        self.previous_tiles = [0 for item in range(self.numTilings)]
#         self.previous_state = [None for item in range(self.numTilings*(self.num_bins)**10)]
        self.previous_prediction = None
        self.lastQ = None
        self.lastPrediction = None
        self.lastReward = None
        self.traceH = TraceHolder(self.mem_size, 0.01, 1000) # TraceHolder(mem, minT, maxN)
        self.F = [0 for item in range(self.numTilings)] # the indices of the returned tiles will go in here
        self.theta = [0 for item in range(self.mem_size)] # weight vector.
#         self.weights = [0 for item in range(self.numTilings*(self.num_bins)**10)]
#         self.e_trace = [0 for item in range(self.numTilings*(self.num_bins)**10)] # added by Ann
        self.cTable = CollisionTable(cTableSize, 'super safe') # look into this...
#         stats = self.cTable.stats()
#         print stats
        self.verifier = Verifier(self.rlGamma)
    
#     def Ann_update(self, current_state, numstates, reward=None):
#         if current_state != None:
#             self.Ann_learn(current_state, reward, numstates)
#             return self.current_prediction
#         else: 
#             return None
#      
#     def Ann_learn(self, current_state, reward, numstates):   
#         active_tiles = simple_tiles(self.numTilings, self.numTilings*self.num_bins, current_state, numstates) # returns index of active features
#         print "active tiles = " + str(active_tiles)
#         print "previous tiles = " + str(self.previous_tiles)
#         if self.previous_prediction != None:
# #             self.current_prediction = 0
#             for index in active_tiles:
#                 print 'index = ' + str(index)
#                 self.current_prediction = self.weights[index] # not sure if this is right
# #                 print 'weights[index] = ' + str(self.weights[index])     
#             self.delta = reward + self.rlGamma * self.current_prediction - self.previous_prediction
#             print 'self.delta = ' + str(self.delta)
#             if self.previous_state != None:
#                 self.previous_state = [0 for item in range(self.numTilings*(self.num_bins)**10)]
#                 for index in self.previous_tiles:
#                     self.previous_state[index] = 1 
# #                     print 'previous state = ' + str(self.previous_state)
#                     self.e_trace = [x + y for x, y in zip(self.previous_state, [i * self.rlLambda * self.rlGamma for i in self.e_trace])]
# #                     print 'e_trace = ' + str(self.e_trace)
#             self.weights = [x + y for x, y in zip(self.weights, [i * self.rlAlpha * self.delta for i in self.e_trace])] # alpha needs to be divided by N
# #             print 'weights = ' + str(self.weights)
#         self.previous_tiles = active_tiles
#         self.previous_prediction = self.current_prediction
#         print 'current prediction = ' + str(self.current_prediction)
#         self.verifier.updateReward(reward)
#         self.verifier.updatePrediction(self.current_prediction)
#         self.normalized_prediction = self.current_prediction * (1-self.rlGamma)
#         print 'normalized prediction = ' + str(self.normalized_prediction)
     
    def update(self, features, target=None):
        if features != None:
            self.learn(features, target)
            return self.prediction
        else: return None
      
    def learn(self, state, reward):
        self.loadFeatures(state, self.F)
        currentq = self.computeQ() # computeQ returns w*x' (current prediction)
        if self.lastS != None:
#             print 'reward = ' + str(reward)
            delta = reward + self.rlGamma*currentq - self.lastQ # delta = r + gamma*w*x' - w*x
            for i in self.traceH.getTraceIndices():
                self.theta[i] += delta * (self.rlAlpha / self.numTilings) * self.traceH.getTrace(i) # delta * alpha/N * e
#                 print 'traces = ' + str(self.traceH.getTrace(i))
            self.traceH.decayTraces(self.rlGamma)
            self.traceH.replaceTraces(self.F)
#             self.e_trace = min(rlLambda*self.e_trace + x, 1) # added by Ann
#             print 'delta = ' + str(delta)
#             print 'trace indices = ' + str(self.traceH.getTraceIndices())
#             print 'theta' + str(self.theta)
#             print 'self.F'+ str(self.F)
#             print 'lastS' + str(self.lastS)
        self.lastQ = currentq
        self.lastS = state
        self.prediction = currentq
        self.num_steps+=1
        self.verifier.updateReward(reward)
        self.verifier.updatePrediction(self.prediction)
        self.normalized_prediction = self.prediction * (1 - self.rlGamma)
        

    def computeQ(self):
        q = 0 
        for i in self.F:
            q += self.theta[i]
        return q
    
    def loadFeatures(self, stateVars, featureVector):
#         loadtiles(featureVector, 0, self.numTilings, self.num_bins, stateVars)
#         active_tiles = loadtiles([0], 0, self.numTilings*self.num_bins, 1024, stateVars)
#         buffer = [0] # array of length numtilings
#         tiles(1,512,stateVars)
#         active_tiles = loadtiles([0], 0, self.numTilings, self.num_bins, stateVars)
        self.F = active_tiles = tiles(self.numTilings,self.mem_size,stateVars)
#         print 'tiles = ' + str(active_tiles)
#         active_tiles = simple_tiles(self.numTilings, self.numTilings*self.num_bins, stateVars)
#         simple_tiles(self.numTilings, self.numTilings*self.num_bins, stateVars)
#         print "featureVector " + str(len(self.theta))
#         print 'active tiles = ' + str(active_tiles)
#         print 'numTilings = ' + str(self.numTilings)
#         print 'stateVars = ' + str(stateVars)
        """ 
        As provided in Rich's explanation
               tiles                   ; a provided array for the tile indices to go into
               starting-element        ; first element of "tiles" to be changed (typically 0)
               num-tilings             ; the number of tilings desired
               memory-size             ; the number of possible tile indices
               floats                  ; a list of real values making up the input vector
               ints)                   ; list of optional inputs to get different hashings
        """
    
    def loss(self, x, r, prev_state=None):
        """
        Returns the TD error assuming reward r given for 
        transition from prev_state to x
        If prev_state is None will use leftmost element in exp_queue
        """
        if prev_state is None:
            if len(self.exp_queue) < self.horizon:
                return None
            else:
                prev_state = self.exp_queue[0][0]

        vp = r + self.gamma * self.value(x)
        v = self.value(prev_state)
        delta = vp - v
        return delta 
    
    
    def predict (self,x):
        self.loadFeatures(x, self.F)
        return self.computeQ()
Exemplo n.º 47
0
def check_errors(file):
    verifier = Verifier()
    errors = verifier.check_errors(file)
    return errors