Exemplo n.º 1
0
    def progress_req(self, data):
        logger.debug('progress req:%s'%str(data))
        logger.info("progress req:%s"%data)
        if not data.has_key('id'):
            self.request.send(jsontool.encode(REQ_INVALID_ID))
            return
        id = data.get('id')

        if request_list.has_key(id):
            self.request.send(jsontool.encode(TASK_NOT_START))
            return

        elif task_list.has_key(id):
            task = task_list.get(id)
            try:
                #p = media_engine.getprogress(task.handle)
                p = self.getengine(data).getprogress(task.handle)
            except AttributeError:
                self.request.send(jsontool.encode(TASK_NOT_START))
            self.request.send(jsontool.encode(TASK_PROGRESS%p))
            return

        elif history_list.has_key(id):
            # in history_list, data has been convert to origin dict
            data = history_list.get(id)
            assert(data.has_key('ret'))
            if data['ret'] == 0:
                self.request.send(jsontool.encode(TASK_FINISHED))
            else:
                self.request.send(jsontool.encode(TASK_FAILED % data['ret']))
            return

        else:
            self.request.send(jsontool.encode(REQ_INVALID_ID))
Exemplo n.º 2
0
    def paste_text(self, clipboard='none', verify=None, block=False):
        if not self.enter_text('DUMMYTEXT'):
            raise Exception('Failed to paste text on Secure Web App: Cannot enter dummy text')
        if not self.get_content_object().select_text():
            raise Exception('Failed to paste text on Secure Web App: Cannot select dummy text')
        if not click_clipboard_command(ui.Android.get('Paste')):
            raise Exception('Failed to paste text on Secure Web App: Cannot choose Paste command')
        self.paste_from_clipboard('none')
        if verify:
            if not self.is_webview_content():
                edit = self.get_content_object().get_child('/EditText')
                if edit.get_content_description() == verify:
                    if block:
                        raise Exception('Failed to paste text on Secure Web App: Text was pasted unexpectedly --- should NOT be pasted')
                else:
                    if not block:
                        raise Exception('Failed to paste text on Secure Web App: Text was NOT pasted successfully')
            else:
                logger.info(''' >> WARNING: Cannot verify the pasted text programmatically.
Check the screen capture if its text is %s equal to %s''' % (('NOT' if block else ''), verify))
        if self.is_android_error():
            raise Exception('Failed to paste text on Secure Web App: Android error')
        if not self.is_app_foreground():
            raise Exception('Failed to paste text on Secure Web App: Eventually app is not on screen')
        return self
Exemplo n.º 3
0
def main():
    app = ServiceImp(stdout = out_f, 
                     stderr = out_f)
    drunner = convertservice.ConvertService(app)
    try:
        drunner.do_action()
    except Exception, e:
        logger.info("%s"%str(e))
Exemplo n.º 4
0
    def config(self, key, value):
        method = self.cfg.get_method(key)
        if method is None:
            logger.error("No method for key %s" % key)
            return False

        logger.info("Setting option %s to value %s" % (key, value))
        return method(value)
Exemplo n.º 5
0
def send_email(to_address, email_subject, email_template, context, cc_addresses=[]):

    if to_address != None:
        email_template += MAIL_FOOTER
        context['adminEmail'] = config.get("MAIL_CONF","mail_admin_request")
        email_message = email_template.format(context)
        cc_addresses.append(config.get("MAIL_CONF","mail_sender"))
        logger.info("Email message is::"+str(email_message))
        push_email(to_address, email_subject, email_message, [], cc_addresses)
Exemplo n.º 6
0
def add_or_update_user_memberships(user_id, roles, update_session):

    current_roles = current.db((user_id == current.db.user_membership.user_id) 
                               & (current.db.user_membership.group_id == current.db.user_group.id)).select(current.db.user_group.role).as_list()

    logger.info("users current roles: %s", current_roles)
    if current_roles != roles:
        current.db(current.db.user_membership.user_id == user_id).delete()
        for role in roles:
            add_membership_db(user_id, role, update_session)
Exemplo n.º 7
0
    def _convert_doc(self, data):
        opts = None
        folder, filename = os.path.split(data['src'])
        basename, extname = os.path.splitext(filename)
        data['dst'] = os.path.join(folder, "%s.%s"%(basename,'pdf'))
        logger.info(doc_engine._makeunoconvcmd(data['src'], data['dst'], opts))

        # call engine 
        h = doc_engine.convertdoc(data['src'], data['dst'], opts)
        return h
Exemplo n.º 8
0
def send_email_to_admin(email_subject, email_message, email_type):
    if email_type == 'report_bug':
        email_address = config.get("MAIL_CONF","mail_admin_bug_report")
    if email_type == 'request':
        email_address = config.get("MAIL_CONF","mail_admin_request")
    if email_type == 'complaint':
        email_address = config.get("MAIL_CONF","mail_admin_complaint")
    user_email_address = auth.user.email
    logger.info("MAIL ADMIN: type:"+email_type+", subject:"+email_subject+", message:"+email_message+", from:"+user_email_address)
    push_email(email_address, email_subject, email_message, user_email_address)
Exemplo n.º 9
0
def schedule_task(fields, _id):

    #Add entry into task_queue_event
    if fields['task_type'] in (CONTAINER_TASK_CREATE, CONTAINER_START, CONTAINER_STOP, CONTAINER_SUSPEND, CONTAINER_RESUME, CONTAINER_DELETE, CONTAINER_RESTART, CONTAINER_RECREATE):
        cont_id = fields['parameters']['cont_id'] if 'cont_id' in fields['parameters'] else None
        cont_name = db.container_data[cont_id].name if cont_id else ""
        task_event_id = db.task_queue_event.insert(task_id = _id,
                            task_type = fields['task_type'],
                            cont_id = cont_id,
                            vm_name = cont_name,
                            requester_id = fields['requester_id'],
                            parameters = fields['parameters'],
                            status = TASK_QUEUE_STATUS_PENDING)
    elif fields['task_type'] == Object_Store_TASK_CREATE:
        task_event_id = db.task_queue_event.insert(task_id = _id,
                            task_type = fields['task_type'],
                            vm_name = "Object_Store",
                            requester_id = fields['requester_id'],
                            parameters = fields['parameters'],
                            status = TASK_QUEUE_STATUS_PENDING)
    else:
        vm_id = fields['parameters']['vm_id'] if 'vm_id' in fields['parameters'] else None
        vm_name = db.vm_data[vm_id].vm_name if vm_id else ""
        task_event_id = db.task_queue_event.insert(task_id = _id,
                            task_type = fields['task_type'],
                            vm_id = vm_id,
                            vm_name = vm_name,
                            requester_id = fields['requester_id'],
                            parameters = fields['parameters'],
                            status = TASK_QUEUE_STATUS_PENDING)
    #Schedule the task in the scheduler 
    if fields['task_type'] == VM_TASK_CLONE:
        # In case of clone vm, Schedule as many task as the number of clones
        for clone_vm_id in fields['parameters']['clone_vm_id']:
            vm_scheduler.queue_task('clone_task', 
                                    pvars = dict(task_event_id = task_event_id, vm_id = clone_vm_id),
                                    start_time = request.now, 
                                    timeout = 30 * MINUTES, 
                                    group_name = 'vm_task')
    else:
        sch_task_name = "vm_task"
        
        if fields['task_type'] == Object_Store_TASK_CREATE:
            logger.info("\n ENTERING OBJECT_TASK	........")
            sch_task_name = 'object_task'
    
        elif fields['task_type'] in (CONTAINER_TASK_CREATE, CONTAINER_START, CONTAINER_STOP, CONTAINER_SUSPEND, CONTAINER_RESUME, CONTAINER_DELETE, CONTAINER_RESTART):
            logger.info("\n ENTERING CONTAINER_TASK	........")
            sch_task_name = 'container_task'
    
        vm_scheduler.queue_task(sch_task_name ,
                                pvars = dict(task_event_id = task_event_id),
                                start_time = request.now, 
                                timeout = 30 * MINUTES, 
                                group_name = 'vm_task')
Exemplo n.º 10
0
def send_email_to_user_manual(email_subject, email_message, vm_id):
    vm_users = []
    context = dict(adminEmail = config.get("MAIL_CONF","mail_admin_request"))

    for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id):
        vm_users.append(user['user_id'])
    for vm_user in vm_users:
        user_info = get_user_details(vm_user)
        if user_info[1] != None:
            logger.info("MAIL USER: User Name: "+ user_info[0])
            send_email(user_info[1], email_subject, email_message, context)
Exemplo n.º 11
0
    def load(self):
        path = self.cfg_file
        if not os.path.isfile(path):
            fd = open(path, 'w', encoding='utf-8')
            json.dump(self.cfg, fd)
            return

        fd = open(path, encoding='utf-8')
        cfg = json.load(fd)
        logger.info("Loaded cfg:\n" + str(cfg))
        for k, v in cfg.items():
            self.cfg[k] = v
Exemplo n.º 12
0
def initiate_android(a):
    global _args
    initiate(a)

    serial_number = settings.device.android_serial
    if _args.serial_number:
        serial_number = _args.serial_number
    android_device.attach(serial_number)
    logger.info('DEVICE=%s' % android_device.get_serial())
    ui.initialize(android_device.get_language())
    uiautomator.initialize(settings.local.UiA_JAR_folder,
                           settings.local.UiA_JAR_name,
                           settings.local.UiA_packbund)
Exemplo n.º 13
0
 def startup(self, param=None):
     retry_count = 0
     retry_max = 2
     while retry_count < retry_max:
         try:
             self.startup_once(param=param)
             break
         except Exception as e:
             if retry_count >= retry_max - 1:
                 raise e
             logger.info(' > WARNING: Failed to launch app. Trying again with re-install.')
             self.prep(policy=None, reinstall=True)
             retry_count += 1
     return self
Exemplo n.º 14
0
 def stop(self):
     """
     kill the convert process
     """
     for data in task_list.values():
         #logger.debug('kill %s'%str(data))
         if hasattr(data, 'handle'):
             try:
                 data.handle.kill()
                 data.handle.retvalue()
                 logger.debug("kill pid %d src file %s"%(data.handle.pid, data.get('src','N/A')))
                 logger.info("kill pid %d src file %s"%(data.handle.pid, data.get('src','N/A')))
             except Exception, e:
                 logger.debug("except while kill %s %s"%(data['src'], str(e)))
Exemplo n.º 15
0
    def _convert_media(self, data):
        opts = _default_opt[data.get('engine','media')].copy()
        opts.update(data.get('opt',{}))

        # dst file is in the same dir of src file
        # with a prefix name _low and ext name change to .webm
        folder, filename = os.path.split(data['src'])
        basename, extname = os.path.splitext(filename)
        data['dst'] = os.path.join(folder, "%s_low.%s"%(basename,'webm'))
        logger.info(media_engine._makeffmpegcmd(data['src'], data['dst'], opts))

        # call engine 
        h = media_engine.convertmedia(data['src'], data['dst'], opts)
        return h
Exemplo n.º 16
0
    def save(self, start = 0, end = INT_MAX):
        if end == INT_MAX:
            end = self.size()

        if start < 0 or end > self.size() or start >= end:
            return

        # this looks not quite efficient, hopefully it will
        # be rarely called in real use
        self.ps_history.delete_all()

        hist = self.history[start:end]
        for entry in hist:
            self.ps_history.save_entry(entry)

        logger.info("Saved history")
Exemplo n.º 17
0
 def _handle_request(self):
     """
     get req from request_list
     run convert synchronous and put handles in task_list
     """
     while True:
         # wait for task finished
         sema_wait(_semaphore_task) 
         # wait request
         sema_wait(_semaphore_req) 
         id, d = request_list.popitem()
         data = DictWrapper(d)
         try:
             logger.debug('recv data: %s'%str(data))
             data.handle = self.engine_function[data.get('engine')](self,data)
         except Exception,e:
             logger.info(e)
         task_list[id] = data
Exemplo n.º 18
0
def send_shutdown_email_to_all():
    vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED)).select()
    for vm_data in vms:
        owner_info = get_user_details(vm_data.owner_id)
        context = dict(vmName = vm_data.vm_name,
                       userName = owner_info[0],
                       vmIp = vm_data.private_ip)
        
        cc_user_list = []
        for user in db(db.user_vm_map.vm_id == vm_data.id).select(db.user_vm_map.user_id):
            if user.user_id != vm_data.owner_id:
                user_info = get_user_details(user.user_id)
                cc_user_list.append(user_info[1])

        logger.info("Sending mail to:: " + str(owner_info[1]))
        send_email(owner_info[1], BAADAL_SHUTDOWN_SUBJECT, BAADAL_SHUTDOWN_BODY, context, cc_user_list)
        import time
        time.sleep(30)
Exemplo n.º 19
0
    def login(self, username=None, password=None, negative=False):
        if not username:
            username = self._username
        if not password:
            password = self._password

        if not self.is_login_prompted():
            return self
        retry = 0
        retry_on_error = 4
        while retry <= retry_on_error:
            UiObj('/EditText[@instance=0]').set_text(username)
            UiObj('/EditText[@instance=1]').set_text(password)
            UiObj('/Button[@text=%s]' % ui.WrapLib.get('Login')).click()
            if not self.wait_for_complete():
                raise WrappedLoginException('Failed to login: Timeout after entering credential')
            early_failure = self.is_login_failed(hide=False)
            errmsg2 = UiObj('/TextView[@package=%s][contains(@text,%s)]' % (self._packbund,
                                                                            ui.WrapLib.get('in the input box to continue')))
            if early_failure or errmsg2.exists():
                if negative:
                    return self  # success
                logger.debug('Wrapped.login: Login failed due to unknown reason.')
                if retry >= retry_on_error:
                    break
                logger.debug('Wrapped.login: Retrying @ %s' % (retry + 1))
                if not early_failure:
                    UiObj('/EditText[@instance=0]').set_text('continue')
                UiObj('/Button[@text=%s' % ui.WrapLib.get('OK')).click()
            elif self.is_android_error():
                logger.debug('Wrapped.login: Android error.')
                if retry >= retry_on_error:
                    break
                self.hide_android_error()
                if not self.is_login_prompted():
                    raise WrappedLoginException('Failed to login: Unexpected Android error with no login prompt any longer')
                logger.info(' > WARNING: Android error on login -- retrying @ %s' % (retry + 1))
            else:
                if negative:
                    raise WrappedLoginException('Failed to login: Login was expected to fail but succeeded')
                return self  # success
            retry += 1
        raise WrappedLoginException('Finally failed to login due to unknown reason')
Exemplo n.º 20
0
 def run(self):
     try:
         self.start()
         ua = TAoneUtil().prep()
         wa = WorkMail().prep('xTA - default').startup()
         android_device.send_key(android_device.KEY_HOME)
         ua.startup()
         logger.info('Waiting one-minute idle time...')
         time.sleep(60)
         wa.initiate(restart=False)
         wa.switch_pane('email')
         if wa.is_login_prompted():
             raise Exception('Login is prompted unexpectedly after one-minute idle time')
         self.complete()
     except Exception as e:
         self.abend(e, take_log=True, take_screenshot=True)
         self.post_error()
     WorkMail().stop()
     TAoneUtil().stop()
Exemplo n.º 21
0
def _free_task_list():
    logger.debug('task_list.key : %s'%task_list.keys())
    for k in task_list.keys():
        handle = task_list.get(k).handle
        assert(hasattr(handle,'isfinished'))
        if hasattr(handle,'isfinished'):
            # handle is returned by engine and should have a isfinished method
            if handle.isfinished():
                logger.info('%s finished, return %d'%(
                            task_list[k].get('src'),
                            handle.retvalue()))
                tmp = dict(task_list.pop(k))
                history_list[k] = tmp
                sema_signal(_semaphore_task)
                tmp['ret'] = handle.retvalue()
                # TODO: here only save dict to file
                #       prefer history like this: 
                #           write_to_history( id, engine, src, status, time)
                history.save(history_list)
Exemplo n.º 22
0
 def run(self):
     try:
         self.start()
         ua = TAoneUtil().prep()
         wa = TAoneApp().prep('xTA - reauth - one minute').startup()
         android_device.send_key(android_device.KEY_HOME)
         ua.startup()
         logger.info('Waiting one-minute idle time...')
         time.sleep(60)
         wa.initiate(restart=False)
         wa.click_function('copytext2')
         if not wa.is_login_prompted():
             raise Exception('Login is NOT prompted unexpectedly after one-minute idle time')
         self.complete()
     except Exception as e:
         self.abend(e, take_log=True, take_screenshot=True)
         self.post_error()
     TAoneApp().stop()
     TAoneUtil().stop()
Exemplo n.º 23
0
def prep_wrapped_apps(install=False):
    for key in settings.info.apps:
        logger.info(key)
        if not connection.assure_app(name=key, policy=None):
            logger.info('Failed to prep app %s on server' % key)
        elif install:
            packbund = settings.info.apps[key]['metadata']['bundle-identifier']
            apk_file = connection.download_app(key, settings.tempfile(packbund + '.apk'))
            if not apk_file:
                logger.info('Failed to download app "%s" from server' % key)
            else:
                if not device.install_app(packbund, apk_file, True):
                    logger.info('Failed to install app "%s" (%s) on device' % (key, packbund))
Exemplo n.º 24
0
def schedule_task(fields, _id):
    #Add entry into task_queue_event
    vm_id = fields['parameters']['vm_id'] if 'vm_id' in fields['parameters'] else None
    if fields['task_type'] != Object_Store_TASK_CREATE:
        vm_name = db.vm_data[vm_id].vm_name if vm_id else ""
    else:
        vm_name = ""
        vm_id = -1

    task_event_id = db.task_queue_event.insert(task_id = _id,
                            task_type = fields['task_type'],
                            vm_id = vm_id,
                            vm_name = vm_name,
                            requester_id = fields['requester_id'],
                            parameters = fields['parameters'],
                            status = TASK_QUEUE_STATUS_PENDING)
    #Schedule the task in the scheduler 
    if fields['task_type'] == VM_TASK_CLONE:
        # In case of clone vm, Schedule as many task as the number of clones
        for clone_vm_id in fields['parameters']['clone_vm_id']:
            vm_scheduler.queue_task('clone_task', 
                                    pvars = dict(task_event_id = task_event_id, vm_id = clone_vm_id),
                                    start_time = request.now, 
                                    timeout = 30 * MINUTES, 
                                    group_name = 'vm_task')

    elif fields['task_type'] == Object_Store_TASK_CREATE:
        logger.info("\n ENTERING OBJECT_TASK	........")
        vm_scheduler.queue_task('object_task' ,
                                pvars = dict(task_event_id = task_event_id),
                                start_time = request.now, 
                                timeout = 30 * MINUTES, 
                                group_name = 'vm_task')

    else:
        vm_scheduler.queue_task('vm_task', 
                                pvars = dict(task_event_id = task_event_id),
                                start_time = request.now, 
                                timeout = 30 * MINUTES, 
                                group_name = 'vm_task')
Exemplo n.º 25
0
    def build_graph(self, save_model_path):
        """
        Build a CNN training graph and save to disk for reloading
        """
        if os.path.exists("{}.meta".format(save_model_path)):
            logger.info("Graph existed, ready to be reloaded...")
        else:
            logger.info("No graph can be loaded, so create a new graph...")
            tf.reset_default_graph()
            # placeholders
            x = self.neural_net_image_input((32, 32, 3))
            y = self.neural_net_label_input(10)
            keep_prob = self.neural_net_keep_prob_input()

            # model
            logits_out = self.conv_net(x, keep_prob)

            # Name logits_out
            logits_out = tf.identity(logits_out, name='logits')

            # loss and optimizer
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
                logits=logits_out, labels=y),
                                  name='cost')
            optimzer = tf.train.AdamOptimizer(name='optimizer').minimize(loss)

            # Accuracy
            correct_pred = tf.equal(tf.argmax(y, axis=1),
                                    tf.argmax(logits_out, axis=1))
            accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32),
                                      name='accuracy')

            # print(type(tf.Variable(1)))
            saver = tf.train.Saver()
            if not os.path.exists('./savedModel'):
                os.mkdir('./savedModel')
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                saver.save(sess, './savedModel/cnn-model')
Exemplo n.º 26
0
    def __load_graph(self, sess, save_model_path):
        """
        Reload the graph from saved meta graph on disk, return a dictionary of tf tensors and ops for usage.
        :param sess: tf.Session()
        :param save_model_path: path storing trained session
        :return: dictionary storing all the necessary tensors and ops
        """
        graph = {}

        loader = tf.train.import_meta_graph(save_model_path + '.meta')
        loader.restore(sess, save_model_path)

        graph['x'] = sess.graph.get_tensor_by_name("x:0")
        graph['y'] = sess.graph.get_tensor_by_name("y:0")
        graph['keep_prob'] = sess.graph.get_tensor_by_name("keep_prob:0")
        graph['cost'] = sess.graph.get_tensor_by_name('cost:0')
        graph['accuracy'] = sess.graph.get_tensor_by_name('accuracy:0')
        graph['logits'] = sess.graph.get_tensor_by_name("logits:0")
        graph['optimizer'] = sess.graph.get_operation_by_name("optimizer")

        logger.info("model is ready, good to go!")
        return graph
Exemplo n.º 27
0
    def check_basic_constraint(self):
        #Anforderung 2.2.5
        try:
            basic_constraint_extension = self.cert.extensions.get_extension_for_class(
                x509.BasicConstraints)
            # test auf vorhandensein der critical constraint
            if basic_constraint_extension.critical:
                logger.info(
                    "Das Zertifikat hat eine als kritisch markierte BasicContraint Extension. Das ist so OK"
                )
                logger.info("Der Inhalt der BasicContraint Extension ist: " +
                            str(basic_constraint_extension))
            else:
                logger.error(
                    "Das Zertifikat hat eine nicht kritisch markierte BasicContraint Extension. Das ist nicht OK"
                )
                logger.warning(
                    "Der Inhalt der BasicContraint Extension ist: " +
                    str(basic_constraint_extension))

        except Exception as err:
            logger.error("Das Zertifikat hat keine BasicContraint Extension")
Exemplo n.º 28
0
    def build_graph(self, save_model_path, lstm_size=128, num_layers=2, learning_rate=0.001, grad_clip=5,
                    sampling=False):
        """
        Build the graph for RNN Model and save them as meta data for future reload. Notice that the initial state from
        lstm layer method cannot be saved directly, initial state is a python tuple of lstm table. We have to flatten
        first and save them as a collection by tensorflow tf.add_to_collection
        :param save_model_path: path of stored model
        :param num_classes: number of unique words
        :param batch_size: number of sequences in one batch
        :param num_steps: number of steps in one sequence
        :param lstm_size: number of output units from lstm layer
        :param num_layers: number of stacked lstm layers
        :param learning_rate: coe during training.
        :param grad_clip: threshold of clipping large gradient during training.
        :param sampling: choose different size of input
        """
        print("creating into " + save_model_path)
        if os.path.exists("{}.meta".format(save_model_path)):
            logger.info("Graph existed, ready to be reloaded...")
        else:
            logger.info("Creating a new meta graph...")
            # When we're using this network for sampling later, we'll be passing in
            # one character at a time, so providing an option for that
            if sampling:
                batch_size, num_steps = 1, 1
            else:
                batch_size, num_steps = self.num_seq, self.num_step

            tf.reset_default_graph()
            inputs, targets, keep_prob = self.build_inputs(batch_size, num_steps)

            cell, initial_state = self.build_lstm(lstm_size, num_layers, batch_size, keep_prob)
            # Add all tensors in inital_state into saved graph.
            for tensor in flatten(initial_state):
                tf.add_to_collection('rnn_state_input', tensor)

            inputs_one_hot = tf.one_hot(inputs, self.len_vocabulary)

            # !!!To-do some ops cannot be run on gpu. Need to figure out!!
            with tf.device('/cpu:0'):
                outputs, final_state = tf.nn.dynamic_rnn(cell, inputs_one_hot, initial_state=initial_state)

                # Add all tensors in final_state into saved graph.
                for tensor in flatten(final_state):
                    tf.add_to_collection('rnn_final_input', tensor)
                out_prob, logits = self.build_output(lstm_output=outputs, in_size=lstm_size, out_size=self.len_vocabulary)
                logits = tf.identity(logits, "logits")
                loss = self.build_loss(logits, targets)
                optimizer = self.build_optimizer(loss, learning_rate, grad_clip)

            saver = tf.train.Saver()
            # if not os.path.exists('./savedModel'):
            #     os.mkdir('./savedModel')
            logger.info("Save to {}".format(save_model_path))
            with tf.Session(graph=tf.get_default_graph()) as sess:
                sess.run(tf.global_variables_initializer())
                saver.save(sess, save_model_path)

            tf.reset_default_graph()
Exemplo n.º 29
0
 def receive_handler(self, client, address, chat):
     """监听收到的消息"""
     while not client._closed:
         try:
             message = self.get_message(client)
             if not message:
                 logger.info('已退出微信<%s:%s>' % address)
                 client.close()
                 self.close_chat(chat, address)  # 退出微信后清除登录信息
                 break
             if message['event_type'] == '6':  # 推送消息(个人,公众号,订阅号等)
                 chat.receiver(message)
             elif message['event_type'] == '5':  # 通讯录信息
                 self.save_contacts_to_chat(message, chat)
             elif message['event_type'] == '2':  # 退出微信
                 logger.info('已退出微信<%s:%s>' % address)
                 client.close()
                 self.close_chat(chat, address)  # 退出微信后清除登录信息
                 break
         except Exception as e:
             logger.exception(e)
             self.statistic_error(e)
Exemplo n.º 30
0
def insert_new_image(image_id, labels):

    statement = 'INSERT INTO tags (image_id, label) values (:image_id, :label)'
    params_sets = []

    for l in labels:
        params_sets.append([{
            'name': 'image_id',
            'value': {
                'stringValue': image_id
            }
        }, {
            'name': 'label',
            'value': {
                'stringValue': l
            }
        }])

    response = batch_execute_statement(statement, params_sets)

    logger.info(f'Number of records updated: {len(response["updateResults"])}')

    return response
Exemplo n.º 31
0
 def run(self):
     global _copytext
     status = True
     try:
         self.start()
         logger.info('Copying text to workspace clipboard...')
         wa = TAoneApp().prep('xTA - clipboard - workspace')
         test_copy_text(wa)
         wa.stop()
         logger.info('Trying to paste text and pause...')
         wa.startup(param=dict(extras=dict(do='pastetext2')))
         wa.paste_text_from_menu(clipboard='stop', verify=None, block=False)
         android_device.send_key(android_device.KEY_HOME)
         time.sleep(5)
         wa.startup(param=dict(restart=False))
         wa.select_paste_from(clipboard='workspace', verify=_copytext, block=False)
         self.complete()
     except Exception as e:
         self.abend(e, take_log=True, take_screenshot=True)
         self.post_error()
         status = False
     TAoneApp().stop()
     return status
Exemplo n.º 32
0
    def test_truncated_hmac_extension(self):
        #Anforderung 2.5.4
        if self.ca_file:
            openssl_ca_opt = "-CAfile " + self.ca_file
        else:
            openssl_ca_opt = ""
        openssl_cmd_getcert = " echo Q | openssl s_client " + openssl_ca_opt + " -connect " + self.hostname + ":" + str(
            self.port) + " -tlsextdebug" + self.openssl_client_proxy_part
        proc = subprocess.Popen([openssl_cmd_getcert],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=True)
        (out, err) = proc.communicate()

        #TODO: wir brauchen mal einen Server mit einer truncated_hmac extension um zu sehen, ob das hier funktioniert.
        if "truncated_hmac" in out:
            logger.error(
                "Server unterstützt die truncated_hmac extension. Das sollte nicht der Fall sein."
            )
        else:
            logger.info(
                "Server unterstützt die truncated_hmac extension nicht. Das ist OK."
            )
Exemplo n.º 33
0
def send_shutdown_email_to_all():
    vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED)).select()
    #data structures to store user data; used in sending email
    user_vms = {}
    user_name = {}
    user_email_ids = set()
    
    for vm_data in vms:
        owner_info = get_user_details(vm_data.owner_id)
        
        #adding unique usernames to dict with email_id as key
        user_name[owner_info[1]] = owner_info[0]
        #storing VM_name in dict with user email-id as key
        if owner_info[1] not in user_vms:
            user_vms[owner_info[1]] =  vm_data.vm_name   
        else:
            user_vms[owner_info[1]] +=  ", " + vm_data.vm_name    
        #extracting unique emil ids from owner_info	    
        user_email_ids.add(owner_info[1])

        cc_user_list = []
        for user in db(db.user_vm_map.vm_id == vm_data.id).select(db.user_vm_map.user_id):
            if user.user_id != vm_data.owner_id:
                user_info = get_user_details(user.user_id)
                cc_user_list.append(user_info[1])
                if user_info[1] not in user_vms:
                    user_vms[user_info[1]] = vm_data.vm_name
                else:
                    user_vms[user_info[1]] += ", " + vm_data.vm_name
                user_email_ids.add(user_info[1])
                user_name[user_info[1]] = user_info[0]
                #logger.info("\nUser of VM: " + str(user_info))
	
        logger.info("VM name: " + vm_data.vm_name + "\tOwner: " + str(owner_info[0]) + "\tand other users: " + str(cc_user_list))
    
    logger.info("Sending mail to user_email_ids " + str(user_email_ids))
    
    #iterating on all unique email_ids to send email
    for email_id in user_email_ids:
        context = dict(userName = user_name[email_id], userVMs = user_vms[email_id])

    	logger.info("Sending mail to: " + email_id)
        logger.info("User VMs: " + user_vms[email_id])
        send_email(email_id, BAADAL_SHUTDOWN_SUBJECT, BAADAL_SHUTDOWN_BODY, context)
       
        import time
  	    time.sleep(30)
Exemplo n.º 34
0
    def check_cert_for_extended_keyusage(self):
        try:
            # liste der extended-keyusage extension auslesen
            keyusage_extension = self.cert.extensions.get_extension_for_class(
                x509.ExtendedKeyUsage)
            usg_list = []
            for usg in keyusage_extension.value._usages:
                usg_list.append(usg._name)

            # test auf serverAuth
            if "serverAuth" in usg_list:
                contains_serverauth = True
                logger.info(
                    "Das Zertifikat hat eine ExtendedKeyUsage Extension mit dem Eintrag serverAuth"
                )
            else:
                contains_serverauth = False
                logger.warning(
                    "Das Zertifikat hat eine ExtendedKeyUsage Extension mit den folgenden Eigenschaften:",
                    usg_list)

        except Exception as err:
            logger.error("Das Zertifikat hat keine ExtendedKeyUsage Extension")
            print err
Exemplo n.º 35
0
    def run(self):

        #lock a tmp file to avoid starting multiple daemons
        self.lockf = open('/tmp/clipon-lock', 'w')
        try:
            fcntl.flock(self.lockf, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except BlockingIOError:
            logger.error("Unable to lock file")
            return

        self.setup()

        self.cfg = CliponConfig(self.cfg_file)
        self.cfg.load()
        self.cfg.save()

        self.history = ClipHistory(self.cfg)

        self.monitor = ClipboardMonitor(self.history)
        self.monitor.start()

        dbus_loop = DBusGMainLoop()
        bus_name = dbus.service.BusName(CLIPON_BUS_NAME,
                        bus=dbus.SessionBus(mainloop=dbus_loop))
        dbus.service.Object.__init__(self, bus_name,
                                     CLIPON_OBJ_PATH)

        GObject.threads_init() #should be called before mainloop
        self.main_loop = GObject.MainLoop()

        self.status = 'active'
        try:
            logger.info("DBus service started")
            self.main_loop.run()
        except (KeyboardInterrupt, SystemExit):
            self.stop()
Exemplo n.º 36
0
    def test_tls_compression(self):
        #Anforderung 2.5.2

        if self.ca_file:
            openssl_ca_opt = "-CAfile " + self.ca_file
        else:
            openssl_ca_opt = ""
        openssl_cmd_getcert = " echo " R" | openssl s_client " + openssl_ca_opt + " -connect " + self.hostname + ":" + str(
            self.port) + self.openssl_client_proxy_part

        proc = subprocess.Popen([openssl_cmd_getcert],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=True)
        (out, err) = proc.communicate()

        if "Compression: NONE" in out:
            logger.info(
                "Server unterstützt keine TLS compression. Das ist das erwartete Verhalten."
            )
        else:
            logger.error(
                "Server unterstützt TLS compression. Das sollte nicht der Fall sein."
            )
Exemplo n.º 37
0
 def __train_and_report(self, sess, graph, batch_ids, save_model_path):
     """
     Train cycle
     :param sess: tf.Session(graph)
     :param graph: dictionary storing all the necessary tensors and ops
     :param batch_ids: list of batch_ids for as training data
     :param save_model_path: path storing trained session
     """
     dir_processed_data = "./ProcessedData"
     valid_features, valid_labels = pickle.load(
         open('{}/preprocess_validation.p'.format(dir_processed_data),
              mode='rb'))
     # Initializing the variables
     sess.run(tf.global_variables_initializer())
     # Training cycle
     for epoch in range(NeuralNetwork.epochs):
         # Loop over all batches
         for batch_i in batch_ids:
             for batch_features, batch_labels in \
                     helper.load_preprocess_training_batch(batch_i, NeuralNetwork.batch_size):
                 self.train_neural_network(sess, graph['x'], graph['y'],
                                           graph['keep_prob'],
                                           graph['optimizer'],
                                           NeuralNetwork.keep_probability,
                                           batch_features, batch_labels)
             logger.info('\nEpoch {:>2}, CIFAR-10 Batch {}:'.format(
                 epoch + 1, batch_i))
             if len(batch_features) != 0 and len(batch_labels) != 0:
                 self.print_stats(sess, graph['x'], graph['y'],
                                  graph['keep_prob'], batch_features,
                                  batch_labels, valid_features,
                                  valid_labels, graph['cost'],
                                  graph['accuracy'])
     # Save Model
     saver = tf.train.Saver()
     saver.save(sess, save_model_path)
Exemplo n.º 38
0
def send_shutdown_email_to_all():
    vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED)).select()
    #data structures to store user data; used in sending email
    user_vms = {}
    user_name = {}
    user_email_ids = set()
    
    for vm_data in vms:
        owner_info = get_user_details(vm_data.owner_id)
        
        #adding unique usernames to dict with email_id as key
        user_name[owner_info[1]] = owner_info[0]
        #storing VM_name in dict with user email-id as key
        if owner_info[1] not in user_vms:
            user_vms[owner_info[1]] =  vm_data.vm_name   
        else:
            user_vms[owner_info[1]] +=  ", " + vm_data.vm_name    
        #extracting unique emil ids from owner_info	    
        user_email_ids.add(owner_info[1])

        cc_user_list = []
        for user in db(db.user_vm_map.vm_id == vm_data.id).select(db.user_vm_map.user_id):
            if user.user_id != vm_data.owner_id:
                user_info = get_user_details(user.user_id)
                cc_user_list.append(user_info[1])
                if user_info[1] not in user_vms:
                    user_vms[user_info[1]] = vm_data.vm_name
                else:
                    user_vms[user_info[1]] += ", " + vm_data.vm_name
                user_email_ids.add(user_info[1])
                user_name[user_info[1]] = user_info[0]
                #logger.info("\nUser of VM: " + str(user_info))

        logger.info("VM name: " + vm_data.vm_name + "\tOwner: " + str(owner_info[0]) + "\tand other users: " + str(cc_user_list))
    
    logger.info("Sending mail to user_email_ids " + str(user_email_ids))
    
    #iterating on all unique email_ids to send email
    for email_id in user_email_ids:
        context = dict(userName = user_name[email_id], userVMs = user_vms[email_id])
        logger.info("Sending mail to: " + email_id)
        logger.info("User VMs: " + user_vms[email_id])
        send_email(email_id, BAADAL_SHUTDOWN_SUBJECT, BAADAL_SHUTDOWN_BODY, context)
       
        import time
        time.sleep(30)
Exemplo n.º 39
0
 def seq_to_seq(self, texts, target_sequence_length):
     save_model_path = './savedModel/seq2seq-model'
     self.build_graph(save_model_path)
     tf.reset_default_graph()
     config = tf.ConfigProto()
     config.gpu_options.allow_growth = True
     config.allow_soft_placement = True
     texts, sequence_length = self.source_to_seq(texts)
     target_sequence_length = np.array([target_sequence_length] *
                                       len(texts))
     print(target_sequence_length, sequence_length)
     with tf.Session(graph=tf.get_default_graph(), config=config) as sess:
         graph = self.load_graph(sess, save_model_path)
         logger.info("Start seq to seq...")
         answer_logits = sess.run(graph['prediction'],
                                  feed_dict={
                                      graph['inputs']: texts,
                                      graph['target_sequence_length']:
                                      target_sequence_length,
                                      graph['source_sequence_length']:
                                      sequence_length,
                                      graph['keep_prob']: self.keep_prob
                                  })
         self.display_prediction(texts, answer_logits)
Exemplo n.º 40
0
 def check_signature_algorithm(self):
     logger.info("Der verwendete Signaturalgorithmus ist : " +
                 str(self.cert.signature_algorithm_oid._name))
     logger.info("Die zugehörige OID lautet: " +
                 str(self.cert.signature_algorithm_oid.dotted_string))
     # ok wenn signaturhash in liste enthalten
     if self.cert.signature_algorithm_oid._name in self.sig_hashes_ok:
         logger.info("Das ist OK")
     else:
         logger.warning("Bitte mit Hilfe der Checkliste überprüfen")
Exemplo n.º 41
0
    def list_alternative_names(self):
        try:
            name_extension = self.cert.extensions.get_extension_for_class(
                x509.SubjectAlternativeName)
            logger.info("Das Zertifikat hat eine AlternateiveName Extension")
            logger.info("Die Einträge für AlternativeNames lauten: ")

            for entry in name_extension._value._general_names:
                logger.info(entry._value)

        except Exception as err:
            print err
Exemplo n.º 42
0
    def check_for_wildcards(self):
        #TODO: Das ist in der Prüfung von CA Zertifkaten anders. Die Funktion hier steigt leider aus wenn es die AlternativeNames extension nich gitb und daher kann man sie eigentlich für ein CA-ZErt nicht verwenden. Und es müssen auch noch mehr Felder (subject) geprüft werden.
        for entry in self.cert.subject._attributes:
            for attr in entry:
                if attr.oid._name == "commonName":
                    logger.info(
                        "commonName im subject des Zertifikat hat den Wert: " +
                        attr.value)
                    # test auf stern in cn
                    if re.search(r"\*+", attr.value) != None:
                        logger.error(
                            "Der CN enthält mindestens ein *. Das ist nicht OK"
                        )

        try:
            name_extension = self.cert.extensions.get_extension_for_class(
                x509.SubjectAlternativeName)
            logger.info("Das Zertifikat hat eine AlternativeName Extension")

            # liste der san
            altname_list = name_extension.value.get_values_for_type(
                x509.DNSName)
            # keine san enthalten
            if len(altname_list) < 1:
                logger.warn("Die Liste der Alernative Names ist leer.")
                return

            # test auf stern in san
            for altname in altname_list:
                if re.search(r"\*+", altname) != None:
                    logger.error(
                        "Die AlternativeName-Extension enthält mindestens ein *. Das ist nicht OK"
                    )
                    return
            logger.info(
                "Die AlternativeName-Extension enthält keine Wildcards. Das ist  OK"
            )

        except Exception as err:
            # eine fehlende SAN-Extension an sich sollte kein Fehler sein
            # bitte prüfen
            logger.info("Es existiert keine AlternativeName-Extension")
Exemplo n.º 43
0
    def check_cert_for_aia(self):
        try:
            aia_extension = self.cert.extensions.get_extension_for_class(
                x509.AuthorityInformationAccess)
            logger.info(
                "Das Zertifikat hat eine AuthorityInformationAccess Extension")
            logger.info(
                "Der Inhalt der AuthorityInformationAccess Extension lautet:")
            logger.info(str(aia_extension))
            #TODO: Die Ausgabe der Extension könnte etwas schöner werden

        except Exception as err:
            print err
Exemplo n.º 44
0
    def check_cert_for_crl(self):
        try:
            crl_extension = self.cert.extensions.get_extension_for_class(
                x509.CRLDistributionPoints)
            logger.info(
                "Das Zertifikat hat eine CRLDistributionPoint Extension")
            logger.info(
                "Der Inhalt der CRLDistributionPoint Extension lautet:")
            logger.info(str(crl_extension))
            #TODO: Die Ausgabe der Extension könnte etwas schöner werden

        except Exception as err:
            print err
Exemplo n.º 45
0
 def run(self):
     status = True
     try:
         self.start()
         logger.info('Disassociating device...')
         if not connection.disassociate_device_by_serial(android_device.get_serial()):
             raise Exception('Device cannot get disassociated')
         logger.info('Re-installing Work Hub...')
         WorkHub().reset(startup=True)
         logger.info('Copying text on a Secure Web App...')
         test_copy_text(TWEdit().prep('xTA - clipboard - workspace'))
         logger.info('Pasting text on another Secure Web App...')
         test_paste_text(TWEdit().prep('xTA - clipboard - workspace'), 'workspace')
         self.complete()
     except Exception as e:
         self.abend(e, take_log=True, take_screenshot=True)
         self.post_error()
         status = False
     TWEdit().stop()
     return status
Exemplo n.º 46
0
    def check_certificate_key(self):
        if (type(self.cert.public_key()) is _RSAPublicKey):
            logger.info("This certificate has an RSA key")
            if self.cert.public_key().key_size >= 2048:
                logger.info(
                    "Die Groesse des Schluessels ist gleich oder groesser 2048. Das ist OK."
                )
            else:
                logger.error(
                    "Die Groesse des Schluessels ist kleiner 2048bit. Das sollte nicht der Fall sein."
                )

            # logger.info.("The key size is: "+str(cert.public_key().key_size))
        if (type(self.cert.public_key()) == DSAPublicKey):
            logger.error(
                "Das Zertifikat hat einen DSA key. Das sollte nicht der Fall sein. Das Skript wird hier beendet da die weiteren Tests nicht sinnvoll sind."
            )
            exit(1)
            #TODO: Der Fall muss noch getestet werden. Die genaue Bezeichnung des Types des public_key könnte leicht anders sein

        if (type(self.cert.public_key()) == EllipticCurvePublicKey):
            #TODO: Dieser Fall ist noch recht ungetestet

            logger.info("Das Zertifikat hat einen EllipticCurvePublicKey")

            allowed_curves = [
                "brainpoolPP256r1", "brainpoolP384r1", "brainpoolP512r1",
                "secp224r1", "secp256r1", "secp384r1", "secp521r1"
            ]
            correct_curve = False

            for crv in allowed_curves:
                if str(self.cert.public_key().curve.name) == crv:
                    logger.info("Es wird folgende Kurfe verwendet:" +
                                str(self.cert.public_key().curve.name) +
                                " Das ist OK")
                    correct_curve = True
            if correct_curve:
                logger.error(
                    "Es wird eine nicht zugelassene Kurve verwendet. Und zwar: "
                    + str(self.cert.public_key().curve.name))
Exemplo n.º 47
0
    def test_server_for_protocol(self):
        print_h1("Test die Anforderungen aus Kapitel 2.3")
        print_h2("Anforderung 2.3.1 Überpreufe die unterstuetzten Protokolle:")
        self.test_supported_protocols()

        print_h2(
            "Anforderung 2.3.2/2.3.3/2.3.4 Überpreufe die unterstuetzten Cipher-Suites:"
        )
        logger.info(
            "Im Folgenden werden die vom Server unterstützten Cipher-Suites gelistet."
        )
        logger.info(
            "Unerlaubte Cipher-Suites werden direkt markiert. Allerdings muss aktuelle manuell geprpft werden "
        )
        logger.info(
            "ob die verpflichtenden cipher-suites umgesetzt sind. Außerdem muss die Priorität der  "
        )
        logger.info("Cipher Suites aktuell manuell geprüft werden.")

        self.test_supported_cipher_suites()

        print_h1("Teste die Anforderungen aus Kapitel 2.4")
        print_h2("Anforderung 2.4.1 Überprüfe die ephemeralen Parameter")
        self.test_key_exchange()

        print_h1("Teste die Anforderungen aus Kapitel 2.5")
        print_h2("Anforderung 2.5.1 Überpruefe Session Renegotiation")
        self.test_session_renegotiation()

        print_h2("Anforderung 2.5.2 Überpruefe TLS Kompression")
        self.test_tls_compression()

        print_h2("Anforderung 2.5.3 Überpruefe auf Heartbeat-Extension")
        self.test_heartbeat_extension()

        print_h2("Anforderung 2.5.4 Überpruefe auf truncated_hmac-Extension")
        self.test_truncated_hmac_extension()
Exemplo n.º 48
0
    def read_certificates(self, server_certificates):
        logger.info(
            "------------------------------------------------------------------------------------"
        )
        logger.info("Rufe die Zertifkate für die weiteren Tests ab")
        logger.info(
            "------------------------------------------------------------------------------------"
        )
        try:
            if server_certificates is None:

                openssl_cmd_getcert = "echo 'Q' | openssl s_client -connect " + self.hostname + ":" + str(
                    self.port
                ) + self.openssl_client_proxy_part + " -showcerts  | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p'"
                proc = subprocess.Popen([openssl_cmd_getcert],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE,
                                        shell=True)
                (out, err) = proc.communicate()
                tmp_certs = pem.parse(out)
            else:
                tmp_certs = pem.parse_file(server_certificates)

            logger.info(
                str(len(tmp_certs)) +
                " Zertifikate wurden empfangen bzw. eingelesen.")

            for crt in tmp_certs:
                self.x509_certs.append(
                    load_pem_x509_certificate(
                        str(crt).encode('ascii', 'ignore'), default_backend()))

            for x509 in self.x509_certs:
                self.certs.append(Certificate(x509, self.ca_file))

        except Exception as err:
            print err
Exemplo n.º 49
0
    def handle(self):
        # check the finished task on request in
        _free_task_list()

        data = self.request.recv(1024*4).strip()
        logger.debug( "recv a req, save it %s"%data)
        logger.info("recived: %s"%data)


        try:
            st_data = jsontool.decode(data)
        except:
            logger.debug("req is invalid json:%s"%data)
            logger.info("req is invalid json:%s"%data)
            self.request.send(jsontool.encode(REQ_INVALID))
            return

        # req format is not correct
        if not checkreq(st_data):
            logger.debug("check req failed: %s"%str(st_data))
            logger.info("check req failed: %s"%str(st_data))
            self.request.send(jsontool.encode(REQ_INVALID))
            return

        try:
            # logger.debug('in handle st_data is %s, type:%s'%(str(st_data), type(st_data)))
            f = self._get_f(st_data.get('type'))
            f(self, st_data)


            logger.debug("request list: %s"%str(request_list))
            logger.debug("task list: %s"%str(task_list))
            logger.debug("history list: %s"%str(history_list))
        except Exception, e:
            logger.info("Error: %s"%str(e))
            logger.debug("Error: %s"%str(e))
Exemplo n.º 50
0
 def run(self):
     status = True
     try:
         self.start()
         logger.info('Re-installing Work Hub...')
         WorkHub().reset(startup=True)
         logger.info('Copying text on unwrapped App...')
         test_copy_text(TAoneUtil().prep())
         logger.info('Pasting text on Wrapped Native App...')
         test_paste_text(TAoneApp().prep('xTA - clipboard - workspace'), 'none')
         self.complete()
     except Exception as e:
         self.abend(e, take_log=True, take_screenshot=True)
         self.post_error()
         status = False
     TAoneUtil().stop()
     TAoneApp().stop()
     return status
Exemplo n.º 51
0
    def check_cert_for_crl(self):
        try:
            crl_extension = self.cert.extensions.get_extension_for_class(
                x509.CRLDistributionPoints)
            logger.info(
                "Das Zertifikat hat eine CRLDistributionPoint Extension")
            logger.info(
                "Der Inhalt der CRLDistributionPoint Extension lautet:")
            logger.info(str(crl_extension))
            #TODO: Die Ausgabe der Extension könnte etwas schöner werden

        except x509.extensions.ExtensionNotFound as err:
            logger.warn(
                "Das Zertifikat besitzt keine CRLDistributionPoint-Extension")

        except Exception as err:
            logger.warn(
                "Unbekannter Fehler beim Lesen der CRLDistributionPoint-Extension"
            )
            print err
Exemplo n.º 52
0
    def check_cert_for_aia(self):
        try:
            aia_extension = self.cert.extensions.get_extension_for_class(
                x509.AuthorityInformationAccess)
            logger.info(
                "Das Zertifikat hat eine AuthorityInformationAccess Extension")
            logger.info(
                "Der Inhalt der AuthorityInformationAccess Extension lautet:")
            logger.info(str(aia_extension))
            #TODO: Die Ausgabe der Extension könnte etwas schöner werden

        except x509.extensions.ExtensionNotFound as err:
            logger.warn(
                "Das Zertifikat besitzt keine AuthorityInformationAccess-Extension"
            )

        except Exception as err:
            logger.warn(
                "Unbekannter Fehler beim Lesen der AuthorityInformationAccess-Extension"
            )
            print err
Exemplo n.º 53
0
    def load_graph(self, sess, save_model_path):

        graph = {}
        print("Saved_model_path:{}".format(save_model_path))
        loader = tf.train.import_meta_graph(save_model_path + '.meta')

        graph['inputs'] = sess.graph.get_tensor_by_name("inputs:0")
        graph['targets'] = sess.graph.get_tensor_by_name("targets:0")
        graph['keep_prob'] = sess.graph.get_tensor_by_name("keep_prob:0")
        graph['cost'] = sess.graph.get_tensor_by_name('cost:0')
        graph['prediction'] = sess.graph.get_tensor_by_name("prediction:0")
        graph['optimizer'] = sess.graph.get_operation_by_name("optimizer")
        graph['accuracy'] = sess.graph.get_tensor_by_name("accuracy:0")

        initial_state_new = tf.get_collection('initial_state')
        initial_state_tuple = ()
        for id in range(0, len(initial_state_new), 2):
            initial_state_tuple += tf.contrib.rnn.LSTMStateTuple(
                initial_state_new[id], initial_state_new[id + 1]),
        graph['initial_state'] = initial_state_tuple

        final_state_new = tf.get_collection('final_state')
        final_state_tuple = ()
        for id in range(0, len(final_state_new), 2):
            final_state_tuple += tf.contrib.rnn.LSTMStateTuple(
                final_state_new[id], final_state_new[id + 1]),
        graph['final_state'] = final_state_tuple

        logger.info("model is ready, good to go!")

        check_point = tf.train.latest_checkpoint('checkpoints')
        # if no check_point found, means we need to start training from scratch, just initialize the variables.
        if not check_point:
            # Initializing the variables
            logger.info("Initializing the variables")
            sess.run(tf.global_variables_initializer())
        else:
            logger.info("check point path:{}".format(check_point))
            loader.restore(sess, check_point)
        return graph
Exemplo n.º 54
0
    def download_and_extract_data(self):
        """
        Download and extract the raw data.
        :param data_path: local disk path which stores the raw data, if not exist, then decide if necessary to download
        the data from server and extract.
        """
        if os.path.exists(self.data_path):
            logger.info("Data is ready, good to go")
        else:
            os.mkdir(self.data_path)
            dataset_filename_compressed = 'text8.zip'
            if not os.path.isfile(dataset_filename_compressed):
                data_remote_url = 'http://mattmahoney.net/dc/text8.zip'
                logger.info("Downloading the data from remote server...")
                with DLProgress(unit="B",
                                unit_scale=True,
                                miniters=1,
                                desc="Text8 data") as pbar:
                    urlretrieve(data_remote_url, dataset_filename_compressed,
                                pbar.hook)
                logger.info("Downloading finished")

            with zipfile.ZipFile(dataset_filename_compressed) as zip_ref:
                zip_ref.extractall(self.data_path)
Exemplo n.º 55
0
 def load_pretrained(self, embedding_dim, vocab_map, vocab_name, pretrained_dir):
     """
     load pretrained file
     :param embedding_dim: Int, configure.embedding.field.dimension
     :param vocab_map: vocab.v2i[field] -> Dict{v:id}
     :param vocab_name: field
     :param pretrained_dir: str, file path
     """
     logger.info('Loading {}-dimension {} embedding from pretrained file: {}'.format(
         embedding_dim, vocab_name, pretrained_dir))
     with open(pretrained_dir, 'r', encoding='utf8') as f_in:
         num_pretrained_vocab = 0
         for line in f_in:
             row = line.rstrip('\n').split(' ')
             if len(row) == 2:
                 assert int(row[1]) == embedding_dim, 'Pretrained dimension %d dismatch the setting %d' \
                                                      % (int(row[1]), embedding_dim)
                 continue
             if row[0] in vocab_map:
                 current_embedding = torch.FloatTensor([float(i) for i in row[1:]])
                 self.lookup_table[vocab_map[row[0]]] = current_embedding
                 num_pretrained_vocab += 1
     logger.info('Total vocab size of %s is %d.' % (vocab_name, len(vocab_map)))
     logger.info('Pretrained vocab embedding has %d / %d' % (num_pretrained_vocab, len(vocab_map)))
Exemplo n.º 56
0
    def check_for_wildcards(self):
        #TODO: Das ist in der Prüfung von CA Zertifkaten anders. Die Funktion hier steigt leider aus wenn es die AlternativeNames extension nich gitb und daher kann man sie eigentlich für ein CA-ZErt nicht verwenden. Und es müssen auch noch mehr Felder (subject) geprüft werden.
        for entry in self.cert.subject._attributes:
            for attr in entry:
                if attr.oid._name == "commonName":
                    logger.info(
                        "commonName im subject des Zertifikat hat den Wert: " +
                        attr.value)
        try:
            name_extension = self.cert.extensions.get_extension_for_class(
                x509.SubjectAlternativeName)
            logger.info("Das Zertifikat hat eine AlternativeName Extension")

            if re.search(r"\*+", str(name_extension)) is not None:
                logger.error(
                    "Die AlternativeName-Extension enthält mindestens ein *. Das ist nicht OK"
                )
            else:
                logger.info(
                    "Die AlternativeName-Extension enthält keine Wildcards. Das ist  OK"
                )

        except Exception as err:
            logger.error("Es existiert keine AlternativeName-Extension")
Exemplo n.º 57
0
def classify_images(location, detector, model, le, confidence=0.5):
    """
	From a image folder location:
	1. Create a real and fake image folder in the current image folder itself. (Only if there aren't such a folder)
	2. Classify the images into real and fake and store them within the created folders. 
	"""

    args = {'detector': detector, 'model': model, 'le': le}

    # Create Folders
    real_location = os.path.join(location, 'real')
    fake_location = os.path.join(location, 'fake')
    noface_location = os.path.join(location, 'noface')
    if not glob(real_location):
        os.mkdir(real_location)
    if not glob(fake_location):
        os.mkdir(fake_location)
    if not glob(noface_location):
        os.mkdir(noface_location)

    # Load Models
    # Load our serialized face detector from disk
    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join(
        ["detectors", args["detector"], "deploy.prototxt"])
    modelPath = os.path.sep.join([
        "detectors", args["detector"],
        "res10_300x300_ssd_iter_140000.caffemodel"
    ])
    net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    # Load the liveness detector model and label encoder from disk
    print("[INFO] loading liveness detector...")
    classifiermodelpath = 'models/' + args['model']
    model = load_model(classifiermodelpath)
    le = pickle.loads(open(args["le"], "rb").read())

    # Grab all images from given folder
    images = glob(os.path.join(location, '*.png'))
    jpg_images = glob(os.path.join(location, '*.jpg'))
    images.extend(jpg_images)

    # Maintain counters for all types of images
    real_counter = 0
    fake_counter = 0
    noface_counter = 0

    bar = tqdm(images, dynamic_ncols=True, desc='Bar desc', leave=True)
    for image in bar:
        frame = cv2.imread(image)
        frame, contains_fake, detected_faces = label(frame, net, model, le,
                                                     confidence)

        # Relocate the image based on whether it is fake, real or noface
        image_name = os.path.basename(image)
        if detected_faces == 0:
            image_location = os.path.join(noface_location, image_name)
            noface_counter += 1
        elif contains_fake:
            image_location = os.path.join(fake_location, image_name)
            fake_counter += 1
        else:
            image_location = os.path.join(real_location, image_name)
            real_counter += 1

        # Shift image to classified location
        cv2.imwrite(image_location, frame)

        # Delete image from unsorted location
        # os.remove(image)

        bar.set_description(image_location)
        bar.refresh()

    logger.info('Real Images Classified: %s' % real_counter)
    logger.info('Fake Images Classified: %s' % fake_counter)
    logger.info('No Face Images Classified: %s' % noface_counter)

    # Count present images in each folder location
    total_real = len(glob(os.path.join(real_location, '*')))
    total_fake = len(glob(os.path.join(fake_location, '*')))
    total_noface = len(glob(os.path.join(noface_location, '*')))

    logger.info('Real Images Present: %s' % total_real)
    logger.info('Fake Images Present: %s' % total_fake)
    logger.info('No Face Images Present: %s' % total_noface)
Exemplo n.º 58
0
from helper import logger
from commands.commands import ToxicBotGeneralCommands
from commands.listener import ToxicBotListener
from commands.error import ToxicBotError
from commands.admin import ToxicBotAdminCommands
from database.create import CreateTables

logger = logging.getLogger("")  # noqa F811

config = RawConfigParser()
config.read("secret.ini")

DISCORD_BOT_TOKEN = config.get("DISCORD", "BOT_TOKEN")
COMMAND_PREFIX = "/"

# Create the tables if it does not exist
create_table = CreateTables()
create_table.createSchema()

logger.info("Database created successfully")

bot = commands.Bot(command_prefix=COMMAND_PREFIX, intents=intents)
bot.remove_command("help")  # Remove the default help command
# Add cogs to the bot
bot.add_cog(ToxicBotGeneralCommands(bot))
bot.add_cog(ToxicBotListener(bot))
bot.add_cog(ToxicBotError(bot))
bot.add_cog(ToxicBotAdminCommands(bot))
# Fetch the token from secret.ini
bot.run(DISCORD_BOT_TOKEN)
Exemplo n.º 59
0
    def test_supported_cipher_suites(self):
        #Anforderung 2.3.2/2.3.3/2.3.4
        #TODO: Funktioniert aktuell nur mit RSA
        crypto_type = "RSA"
        openssl_cmd_getcert = "openssl ciphers"
        proc = subprocess.Popen([openssl_cmd_getcert],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                shell=True)
        (out, err) = proc.communicate()

        out = out.replace('\n', '').replace('\r', '')
        all_ciphers = out.split(":")
        all_ciphers = filter(None, all_ciphers)
        all_ciphers = filter(None, all_ciphers)

        for cipher in all_ciphers:
            try:
                cipher_list = [
                    x for x in self.cipher_suites
                    if x[1] == cipher and x[2] == crypto_type
                ]
                allowed = should = must = optional = False

                if len(cipher_list) == 0:
                    allowed = False
                elif cipher_list[0][3] == "MUST":
                    must = True
                    allowed = True
                elif cipher_list[0][3] == "SHOULD":
                    should = True
                    allowed = True
                elif cipher_list[0][3] == "OPTIONAL":
                    optional = True
                    allowed = True

                context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
                context.set_ciphers(cipher)
                context.verify_mode = ssl.CERT_REQUIRED
                context.check_hostname = True
                context.load_default_certs()

                ssl_sock = self.__connect_ssl_socket(context)
                priority = ssl_sock.cipher()[2]

                if not allowed:
                    logger.error(
                        "Server unterstützt verbotene cipher-suite: " +
                        cipher + " mit Priorität" + str(priority) +
                        " Das sollte nicht der Fall sein")

                elif must or should or optional:
                    logger.warning(cipher + " wird unterstützt mit Priorität" +
                                   str(priority) +
                                   ". Bitte in der Checkliste prüfen.")

            except ssl.SSLError as err:
                if len(err.args) > 1 and (
                        "SSLV3_ALERT_HANDSHAKE_FAILURE" in err.args[1]
                        or "NO_CIPHERS_AVAILABLE" in err.args[1]):
                    if must:
                        logger.error(
                            cipher +
                            " wird nicht unterstützt aber von der Checkliste gefordert"
                        )
                    else:
                        logger.info(
                            cipher +
                            " wird nicht unterstützt. Das scheint OK zu sein.")
                if len(err.args) == 1:
                    if must:
                        logger.error(
                            cipher +
                            " wird nicht unterstützt aber von der Checkliste gefordert"
                        )
                    else:
                        logger.info(
                            cipher +
                            " wird nicht unterstützt. Das scheint OK zu sein.")
Exemplo n.º 60
0
    def __init__(self,
                 config,
                 min_freq=1,
                 special_token=['<PADDING>', '<OOV>'],
                 max_size=None):
        """
        vocabulary class for text classification, initialized from pretrained embedding file
        and update based on minimum frequency and maximum size
        :param config: helper.configure, Configure Object
        :param min_freq: int, the minimum frequency of tokens
        :param special_token: List[Str], e.g. padding and out-of-vocabulary
        :param max_size: int, maximum size of the overall vocabulary
        """
        logger.info('Building Vocabulary....')
        self.corpus_files = {
            "TRAIN": os.path.join(config.data.data_dir,
                                  config.data.train_file),
            "VAL": os.path.join(config.data.data_dir, config.data.val_file),
            "TEST": os.path.join(config.data.data_dir, config.data.test_file)
        }
        counter = Counter()
        self.config = config
        # counter for tokens
        self.freqs = {'token': counter.copy(), 'label': counter.copy()}
        # vocab to index
        self.v2i = {'token': dict(), 'label': dict()}
        # index to vocab
        self.i2v = {'token': dict(), 'label': dict()}

        self.min_freq = max(min_freq, 1)
        if not os.path.isdir(self.config.vocabulary.dir):
            os.system('mkdir ' + str(self.config.vocabulary.dir))
        token_dir = os.path.join(self.config.vocabulary.dir,
                                 self.config.vocabulary.vocab_dict)
        label_dir = os.path.join(self.config.vocabulary.dir,
                                 self.config.vocabulary.label_dict)
        vocab_dir = {'token': token_dir, 'label': label_dir}
        if os.path.isfile(label_dir) and os.path.isfile(token_dir):
            logger.info('Loading Vocabulary from Cached Dictionary...')
            with open(token_dir, 'r') as f_in:
                for i, line in enumerate(f_in):
                    data = line.rstrip().split('\t')
                    assert len(data) == 2
                    self.v2i['token'][data[0]] = i
                    self.i2v['token'][i] = data[0]
            with open(label_dir, 'r') as f_in:
                for i, line in enumerate(f_in):
                    data = line.rstrip().split('\t')
                    assert len(data) == 2
                    self.v2i['label'][data[0]] = i
                    self.i2v['label'][i] = data[0]
            for vocab in self.v2i.keys():
                logger.info('Vocabulary of ' + vocab + ' ' +
                            str(len(self.v2i[vocab])))
        else:
            logger.info('Generating Vocabulary from Corpus...')
            self._load_pretrained_embedding_vocab()
            self._count_vocab_from_corpus()
            for vocab in self.freqs.keys():
                logger.info('Vocabulary of ' + vocab + ' ' +
                            str(len(self.freqs[vocab])))

            self._shrink_vocab('token', max_size)
            for s_token in special_token:
                self.freqs['token'][s_token] = self.min_freq

            for field in self.freqs.keys():
                temp_vocab_list = list(self.freqs[field].keys())
                for i, k in enumerate(temp_vocab_list):
                    self.v2i[field][k] = i
                    self.i2v[field][i] = k
                logger.info('Vocabulary of ' + field + ' with the size of ' +
                            str(len(self.v2i[field].keys())))
                with open(vocab_dir[field], 'w') as f_out:
                    for k in list(self.v2i[field].keys()):
                        f_out.write(k + '\t' + str(self.freqs[field][k]) +
                                    '\n')
                logger.info('Save Vocabulary in ' + vocab_dir[field])
        self.padding_index = self.v2i['token']['<PADDING>']
        self.oov_index = self.v2i['token']['<OOV>']