Example #1
0
def send_email(send_to, templaterich, templateplain, subject, **kwargs):
    """
        Sends an email to the target email with two types
            1) HTML
            2) Plain

        We will try the template with .htm for rich and .txt for plain.

        Both will rendered with Jinja2
    """

    mailer = Mailer(dict(
        transport=dict(use='smtp', host=config.EMAIL_SMTP_SERVER, debug=config.EMAIL_DEBUG),
        manager=dict()))

    mailer.start()

    message = mailer.new()
    message.author = config.EMAIL_SENDER
    message.to = send_to
    message.subject = subject

    template_rich = env.get_template(templaterich)
    template_plain = env.get_template(templateplain)

    message.rich = template_rich.render(**kwargs)
    message.plain = template_plain.render(**kwargs)

    logger.info('Sent an email to ' + send_to)

    message.send()
    mailer.stop()
Example #2
0
 def dataHandleCoroutine(self):
     length = self.factory.dataprotocl.getHeadlength()
     while True:
         data = yield
         self.buff += data
         while len(self.buff) >= length:
             try:
                 self.factory.dataprotocl.unpack(self.buff[:length])
             except Exception:
                 logger.info('receive illegal header package')
                 self.transport.loseConnection()
                 break
             rlength = self.factory.dataprotocl.datalen
             command = self.factory.dataprotocl.command
             cmdid = self.factory.dataprotocl.cmdid
             request = self.buff[length:length + rlength]
             if len(request) < rlength:
                 logger.info('some data lost')
                 break
             self.buff = self.buff[length + rlength:]
             d = self.factory.doDataReceived(self, command, request)
             if not d:
                 continue
             d.addCallback(self.safeToWriteData, cmdid)
             d.addErrback(DefferedErrorHandle)
Example #3
0
File: root.py Project: chenrui/push
 def remote_takeProxy(self, name, transport):
     '''添加node节点
     '''
     logger.info('node [%s] connect' % name)
     node = Node(name, name)
     self.nodesmanager.addNode(node)
     node.setTransport(transport)
     self.doNodeConnect(name, transport)
Example #4
0
 def storage(self, data):
     if not self.parse_audience(data['audience']):
         logger.info('audience %s not found' % data['audience'])
         return ErrorPage(ErrNo.NO_MATCHED_OBJ)
     msg = self.parse_nofification(data['app_key'], data['notification'], data.get('options', None))
     # send msg async
     threads.deferToThread(self.send_to_router, data['audience'], msg)
     return SuccessPage(msg.to_dict(exclude=('app_key', 'generator', 'title', 'body', 'expires')))
Example #5
0
File: root.py Project: chenrui/push
 def dropNodeSessionId(self, session_id):
     '''删除子节点记录'''
     node = self.nodesmanager.getNodeBySessionId(session_id)
     if not node:
         return
     logger.info('node [%s] lost' % node._name)
     node_id = node._id
     self.doNodeLostConnect(node_id)
     self.nodesmanager.dropNodeByID(node_id)
Example #6
0
def _push(connID, did, data):
    msg = PushMessage()
    msg.id = data['id']
    msg.sendno = data['sendno']
    msg.generator = data['generator']
    msg.title = data['title']
    msg.body = data['body']
    msg.timestamp = data['timestamp']
    logger.info('push message(id:%d, did:%s)' % (msg.id, did))
    factory.connmanager.pushObject(DataPackProtoc.CMD_PUAH, msg.SerializeToString(), [connID])
 def getAvgTXPwrByDistance(self):
     avgtxs = []
     uniqueDistances = list(set(self.distances))
     uniqueDistances.sort()
     logger.info("Found %d unique distances" % len(uniqueDistances))
     logger.info("Calculating Average TXPwr for each distance") 
     for distance in uniqueDistances:
         es = self.findEventByDistance(distance)
         avgtxs.append(es.avgtxpwr)
     
     return uniqueDistances, np.array(avgtxs)
    def train(self, counter=1, gen_dirs=()):
        if self.conf.need_to_load:
            self.load(self.conf.checkpoint_dir, step=counter)

        data = self.data
        logger.info('Total amount of images: %s' % len(data))
        # np.random.shuffle(data)

        tf.initialize_all_variables().run()

        # counter = 1
        start_time = time.time()
        batch_idxs = min(len(data), self.conf.train_size) / self.conf.batch_size

        stego_accuracy = 0

        accuracies = []
        accuracies_steps = []

        logger.debug('Starting updating')
        for epoch in range(self.conf.epoch):
            losses = []

            np.random.shuffle(data)

            logger.info('Starting epoch %s' % epoch)

            for idx in range(0, int(batch_idxs)):
                batch_files = data[idx * self.conf.batch_size:(idx + 1) * self.conf.batch_size]
                batch = [get_image(batch_file, self.conf.image_size)
                         for batch_file in batch_files]
                batch_images = np.array(batch).astype(np.float32)

                batch_targets = self.get_targets(batch_files)

                self.sess.run(self.optimize, feed_dict={self.images: batch_images, self.target: batch_targets})
                loss = self.loss.eval({self.images: batch_images, self.target: batch_targets})

                losses.append(loss)

                # logger.debug("[ITERATION] Epoch [%2d], iteration [%4d/%4d] time: %4.4f, Loss: %8f, accuracy: %8f" %
                #              (epoch, idx, batch_idxs, time.time() - start_time, loss, stego_accuracy))

                counter += 1

                if counter % 300 == 0:
                    logger.info('------')

                    stego_accuracy = self.accuracy(n_files=-1, test_dir=self.test_dir)
                    logger.info('[TEST] Epoch {:2d} accuracy: {:3.1f}%'.format(epoch + 1, 100 * stego_accuracy))

                    for gen_dir in gen_dirs:
                        gen_accuracy = self.accuracy(n_files=-1, test_dir=gen_dir)
                        logger.info('[GEN_TEST] Folder {}, accuracy: {:3.1f}%'.format(gen_dir, 100 * gen_accuracy))
 def drawTXRegion(self,pos,size):
     draw = ImageDraw.Draw(self.cimg)
     x,y = pos
     x = np.int16(np.floor(x))
     y = np.int16(np.floor(y))
     logger.info("x:%f,y:%f" % (x,y))
     draw.rectangle((x-size,y-size,x+size,y+size),
     outline=(255,255,0), fill=(255,0,255))
     
     draw.text((x-size/2,y-size/2), "Rep")
     return np.asarray(self._cimg)
 def getAvgPathlossByDistance(self):
     avgloss = []
     uniqueDistances = list(set(self.distances))
     uniqueDistances.sort()
     logger.info("Found %d unique distances" % len(uniqueDistances))
     logger.info("Calculating Average Pathloss for each distance") 
     for distance in uniqueDistances:
         es = self.findEventByDistance(distance)
         avgloss.append(es.avgpathloss)
     
     return uniqueDistances, np.array(avgloss)
 def plot(self):
     logger.info("Plotting Data")
     self.fig1 = pyplot.figure(1)
     self.fig1.show()
     rxpwrs = self.events.rxpwrs
     pyplot.scatter(self.events.distances, self.events.rxpwrs)
     pyplot.plot(self.dps, self.rxp, c='r')  
     pyplot.scatter(self.dps,self.rxp,c='r')
     ds = np.arange(100,1300,5)
     estrx = ChannelModel.estPwr(self.kappa,self.eta,ds)
     pyplot.plot(ds,estrx,c='g', linewidth=3)
Example #12
0
def getAppData(key):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    response = AppDataCore.getAppDataForKey(key)

    if response == None:
        error = "Invalid data requested"

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
    def save(self, checkpoint_dir, step):

        model_dir = "%s_%s" % (self.conf.model_name, self.conf.batch_size)
        checkpoint_dir = os.path.join(checkpoint_dir, model_dir)

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        ckpt_name = "%s_%s.ckpt" % (self.conf.model_name, step)

        logger.info("[SAVING] step: %s, name: %s" % (step, ckpt_name))
        self.saver.save(self.sess, os.path.join(checkpoint_dir, ckpt_name), global_step=step)
Example #14
0
def readfile():
    logger.info("Hitting URL %s", request.url)

    rawContent = None
    filePath = request.args.get("file_path")
    error, response = FileIO.readFile(filePath)

    if error:
        response = None

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Example #15
0
def component_proxy(message):
    """
    Proxy between email component and sms interpreter.
    Email component event returns an email.Message object and the interpreter takes 2 arguments:
        text and a mobile number.
    """
    response_dictionary = sms.send_sms(str(message["To"].split('@')[0]), message.get_payload())

    if response_dictionary["status"] == 200:
        logger.info("SMS Sent out successfully to: " + str(message["To"].split('@')[0]))
    else:
        logger.info("SMS Sent out to " + str(message["To"].split('@')[0]) + " with the status code: " + str(
            response_dictionary["status"]))
Example #16
0
def getProfileInfo(profileName):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    appData = AppDataCore.getAppData()
    response = appData["profile"].get(profileName, None)

    if response == None:
        error = "Profile %s does not exist." % (profileName)

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
    def load(self, checkpoint_dir, step):
        model_dir = "%s_%s" % (self.conf.model_name, self.conf.batch_size)
        checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
        try:
            ckpt_name = "%s_%s.ckpt-%s" % (self.conf.model_name, step, step)

            logger.info("[LOADING] step: %s, name: %s" % (step, ckpt_name))
            self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
        except Exception as e:
            logger.debug(e)
            ckpt_name = "StegoDCGAN-%s" % (step)

            logger.info("[LOADING] step: %s, name: %s" % (step, ckpt_name))
            self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
    def __init__(self, events, estkappa=-35.0, esteta=2.0, maxloop=1000):
        self.estkappa = estkappa
        self.esteta = esteta
        self.eta = self.esteta
        self.kappa = self.estkappa
        self.events = events
        self.maxloop = maxloop

        self.fp = lambda v,x,y: ChannelModel.estDistance(v[0],v[1],x,y)
        self.err = lambda v,x,y,z: (self.fp(v,x,y)-z)
        self.v0 = [self.estkappa, self.esteta]
        logger.info("Sorting By Distance")
        self._sortByDistance()
        logger.info("Calculating Optimized Parameters")
        self._optimizeParams()
Example #19
0
def check_acking_queue():
    current_time = int(time.time())
    rets = msgQueue.get_from_acking()
    if len(rets) == 0:
        return
    for item in rets:
        did = item[0]
        msg = item[1]
        expires = int(msg['expires'])
        if current_time > expires:
            logger.info('message(id:%d, did:%s) expired, drop it' % (int(msg['id']), did))
            msgQueue.rem_from_acking(did, msg['id'])
            msgQueue.add_to_dead(did, msg['id'], MessageStatus.EXPIRED)
        else:
            # TODO: resend???
            pass
Example #20
0
def addProfile(profile):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    action = request.form.get("action")

    if action == "add":
        error, response = TagCore.addProfile(profile)
    elif action == "delete":
        error, response = TagCore.removeProfile(profile)

    if error:
        response = None

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Example #21
0
def send_to_router():
    current_time = int(time.time())
    rets = msgQueue.get_from_sending()
    if len(rets) == 0:
        return
    for item in rets:
        did = item[0]
        msg = item[1]
        expires = int(msg.pop('expires'))
        msg['sendno'] = int(msg['sendno'])
        msg['timestamp'] = int(msg['timestamp'])
        msg['id'] = int(msg['id'])
        if current_time > expires:
            logger.info('message(id:%d, did:%s) not send and expired, drop it' % (msg['id'], did))
            msgQueue.rem_from_sending(did, msg['id'])
            msgQueue.add_to_dead(did, msg['id'], MessageStatus.NOT_SEND)
        else:
            defer = remote.callRemote('is_device_online', did)
            defer.addCallback(_send_callback, did, msg)
    def accuracy(self, test_dir='test', abs=False, n_files=2 ** 12):
        logger.info('[TEST], test data folder: %s, n_files: %s' % (test_dir, 2 * n_files))
        X_test = self.get_images_names('%s/*.%s' % (test_dir, self.conf.img_format), abs=abs)[:n_files]

        accuracies = []

        batch_idxs = min(len(X_test), self.conf.train_size) / self.conf.batch_size

        # logger.debug('Starting iteration')
        for idx in range(0, int(batch_idxs)):
            batch_files_stego = X_test[idx * self.conf.batch_size:(idx + 1) * self.conf.batch_size]
            batch = [get_image(batch_file, self.conf.image_size) for batch_file in batch_files_stego]
            batch_images = np.array(batch).astype(np.float32)

            batch_targets = self.get_targets(batch_files_stego)

            accuracies.append(self.get_accuracy(batch_images, batch_targets))

        return np.mean(accuracies)
Example #23
0
def main():
    logger.info('Started.')

    gmail = build('gmail', 'v1', http=make_google())
    gmail_send = gmail.users().messages().send
    email_messages = {}

    with open(CONFIG_FILE, 'r') as file:
        users = json.load(file)
    logger.debug('User loaded')

    for endpoint in ENDPOINTS:
        logger.info('Processing endpoint: %s', endpoint.name)
        endpoint.request()
        if not make_cache(endpoint.name, endpoint._text):
            logger.debug('Content is same with cached. Skipping.')
            continue
        for user in users:
            logger.debug('Processing user: %s', user['name'])
            if user['name'] in endpoint:
                logger.debug('User in endpoint.')
                message = make_message('match', regexp=user['name'],
                                       name=endpoint.name, url=endpoint.url)
                email_messages.setdefault(user['email'], []).append(message)

    logger.info('Done fetching endpoints. Now drafting email.')

    queue = []
    for recepient, messages in email_messages.items():
        message_text = make_message('frame', matches='\n'.join(messages))
        message = MIMEText(message_text)
        message['to'] = recepient
        # message['from'] = sender
        message['subject'] = MAIL_SUBJECT

        # The byte/str drama, you know.
        raw = base64.b64encode(message.as_string().encode())
        queue.append({'raw': raw.decode()})

    logger.info('%d email(s) have been drafted. Sending.', len(queue))
    consume_message_queue(gmail_send, queue)
    logger.info('Done.')
Example #24
0
    def run(self, stop_on_failure=False):
        fail_cnt = 0
        metadata = None
        getter = self.iter_get()
        try:
            while True:
                try:
                    # Some getters require metadata from the last response to figure out the
                    # pagination of the next one
                    data_str = getter.send(metadata)
                    logger.debug('Got: %s' % data_str)
                    logger.info('Got records')

                    data = self.parse(data_str)
                    logger.debug('Parsed into: %s' % str(data))

                    metadata, records = self.normalize(data)
                    logger.debug('Normalized: \nMetadata: %s \nRecords: %s' % \
                        (str(metadata), str(records)))
                    logger.info('Normalized %d records' % len(records))

                    logger.info('Storing')
                    self.store(metadata, records)
                except Exception, e:
                    if isinstance(e, StopIteration):
                        raise

                    if stop_on_failure or fail_cnt > 100:
                        raise
                    else:
                        fail_cnt += 1
                        sys.stderr.write("Failed: %s" % str(e))
                        continue
        except StopIteration:
            pass
def main(_):
    logger.info('====================================================')
    logger.info('===================NEW EXPERIMENT===================')
    logger.info('====================================================')

    logger.info(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        steganalisys = Steganalyzer(config=FLAGS, sess=sess, stego_algorithm=LSBMatching, stego_name='lsb_matching')

        if FLAGS.is_train:
            steganalisys.train(counter=1, gen_dirs=['gen_test_seed_666', 'gen_test_more_train'])
        else:
            # steganalisys.load(FLAGS.checkpoint_dir, step=28481) # LSB matching clf
            steganalisys.load(FLAGS.checkpoint_dir, step=122114)

        print('ACCURACY:::::::::%s' % steganalisys.accuracy(test_dir='gen_test_more_train', n_files=-1))
Example #26
0
    def normalize_metadata(self, data):
        logger.info('Original Structure:\n%s' % get_structure(data))

        records = [d['data'] for d in data['data']['children']]
        return {'after': data['data']['after']}, records
Example #27
0
 def normalize_metadata(self, data):
     data = fix_single_elems(data)
     logger.info('Original Structure:\n%s' % get_structure(data))
     return {}, [data['result']['project']]
Example #28
0
# +++++++++++++++++++++++++++++++++++++++++++++++++++
import time

import numpy as np

from data.load_data import load_data
from utils.args import args
from utils.logger import logger
from utils.calc_hamming_ranking import calc_hamming_ranking
from dlfh_algo import dlfh_algo
from hash_func import linear_hash

if __name__ == "__main__":
    np.random.seed(args.seed)
    '''load dataset'''
    logger.info('load dataset: {}'.format(args.dataname))
    test_text, test_image, test_label, database_text, database_image, database_label = load_data(
    )
    '''learning procedure'''
    logger.info('start training procedure')
    start_t = time.time()
    db_image_codes, db_text_codes = dlfh_algo(train_labels=database_label)
    '''out-of-sample extension'''
    wx = linear_hash(database_image, db_image_codes)
    wy = linear_hash(database_text, db_text_codes)

    end_t = time.time() - start_t
    '''start encoding'''
    test_image_codes = np.sign(test_image.dot(wx))
    test_text_codes = np.sign(test_text.dot(wy))
Example #29
0
def fastai_prediction(train_df, test_df, test_folder, column_list, path,
                      topic):
    result_dict = {}
    for column in column_list:
        df_trn = train_df[['image_path', column]].copy()
        df_trn.columns = ['name', 'label']
        df_trn = df_trn[~pd.isnull(df_trn['label'])]
        df_trn['label'] = df_trn['label'].astype(str)

        tfms = get_transforms()

        data = ImageDataBunch.from_df(path,
                                      df=df_trn,
                                      size=224,
                                      num_workers=6,
                                      ds_tfms=tfms,
                                      valid_pct=0.0001)
        data.normalize()

        learn = create_cnn(data, models.resnet34, metrics=accuracy)
        learn.fit(1)
        learn.unfreeze()
        learn.fit(5)

        train_metric = learn.validate(learn.data.train_dl)
        val_metric = learn.validate(learn.data.valid_dl)
        train_loss = train_metric[0]
        val_loss = val_metric[0]
        train_acc = train_metric[1].item()
        val_acc = val_metric[1].item()

        logger.info('Train Loss on Topic {} : {}'.format(column, train_loss))
        logger.info('Validation Loss on Topic {} : {}'.format(
            column, val_loss))
        logger.info('Train Accuracy on Topic {} : {}'.format(
            column, train_acc))
        logger.info('Validation Accuracy on Topic {} : {}'.format(
            column, val_acc))

        learn.export(fname='{}_{}_export.pkl'.format(topic, column))

        test = ImageList.from_folder(path / test_folder)
        final_learn = load_learner(path,
                                   test=test,
                                   fname='{}_{}_export.pkl'.format(
                                       topic, column))
        test_items = final_learn.data.label_list.test.items
        test_preds = final_learn.get_preds(ds_type=DatasetType.Test)
        pred_df = pd.DataFrame(to_np(test_preds[0]),
                               columns=learn.data.classes)

        pred_df['image_path'] = test_items
        pred_df['image_path'] = pred_df['image_path'].apply(
            lambda y: str(y).split('/')[-1])

        test_df['image_path'] = test_df['image_path'].apply(
            lambda y: str(y).split('/')[-1])

        merge_df = test_df[['image_path']].merge(pred_df,
                                                 on='image_path',
                                                 how='left',
                                                 validate='1:1')
        merge_df = merge_df.drop(columns=['image_path'])

        result_dict[column] = merge_df
    return result_dict
Example #30
0
 def stop(self):
     for idx, proc in enumerate(self.procs):
         proc.join()
         if self.verbose:
             logger.info("process: {} done".format(idx))
Example #31
0
        return "Password Mismatch," + "NOT MINING" if consts.NO_MINING else "MINING"


@app.route("/<url:re:.+>")
@error(403)
@error(404)
@error(505)
def error_handle(url="url", error="404"):
    log_ip(request, inspect.stack()[0][3])
    return template("error.html")


if __name__ == "__main__":
    try:
        if consts.NEW_BLOCKCHAIN:
            logger.info("FullNode: Starting New Chain from Genesis")
            BLOCKCHAIN.add_block(genesis_block)
        else:
            # Restore Blockchain
            logger.info("FullNode: Restoring Existing Chain")
            header_list = read_header_list_from_db()
            BLOCKCHAIN.build_from_header_list(header_list)

        # Sync with all my peers
        sync_with_peers()

        # Start mining Thread
        Thread(target=start_mining_thread, daemon=True).start()
        if consts.NO_MINING:
            logger.info("FullNode: Not Mining")
Example #32
0
def save_pt_key(pin, ptkey):
    Jd.update(
        pin=pin,
        ptkey=ptkey,
    ).where(Jd.pin == pin).execute()
    logger.info("更新京东用户ptkey【{}】信息".format(pin))
    def fit_generator(self):
        # 保存超参数
        self.parameters['model_env_parameters'][
            'is_training'] = False  # 预测时候这些设为False
        self.parameters['model_env_parameters']['trainable'] = False
        save_json(jsons=self.i2l, json_path=self.index2label_path)
        save_json(jsons=self.parameters, json_path=self.path_parameters)
        model_code = self.model_code

        # DataGenerator只是一种为了节约内存的数据方式
        class DataGenerator:
            def __init__(self,
                         data,
                         l2i,
                         tokenizer,
                         categories,
                         maxlen=128,
                         batch_size=32,
                         shuffle=True):
                self.data = data
                self.l2i = l2i
                self.batch_size = batch_size
                self.categories = categories
                self.maxlen = maxlen
                self.tokenizer = tokenizer
                self.shuffle = shuffle
                self.steps = len(self.data) // self.batch_size
                if len(self.data) % self.batch_size != 0:
                    self.steps += 1

            def __len__(self):
                return self.steps

            def __iter__(self):
                while True:
                    idxs = list(range(len(self.data)))
                    if self.shuffle:
                        np.random.shuffle(idxs)
                    X, Y = [], []
                    for i in idxs:
                        d = self.data[i]
                        text = d[1][:self.maxlen].replace(' ', '')
                        x = self.tokenizer.encode(
                            text, algo_code=model_code)  # token_ids
                        # print(text)
                        # print(x)
                        y = self.l2i.get(str(d[0]))
                        X.append(x)
                        Y.append(y)
                        if len(X) == self.batch_size or i == idxs[-1]:
                            X = seq_padding(X, 0, self.maxlen)
                            Y = np.array(to_categorical(Y, self.categories))
                            # print("*"*10,X.shape)
                            yield (X, Y)
                            X, Y = [], []

        train_D = MyDataGenerator(self.train_data,
                                  self.l2i,
                                  self.tokenizer,
                                  self.categories,
                                  self.max_len,
                                  self.batch_size,
                                  shuffle=True)
        valid_D = MyDataGenerator(self.valid_data,
                                  self.l2i,
                                  self.tokenizer,
                                  self.categories,
                                  self.max_len,
                                  self.batch_size,
                                  shuffle=True)
        # test_D = DataGenerator(self.test_data, self.l2i,self.tokenizer, self.categories, self.max_len, self.batch_size,
        #                        shuffle=True)

        # 模型训练
        history = self.model.fit_generator(
            train_D.__iter__(),
            steps_per_epoch=len(train_D),
            epochs=self.epoch,
            validation_data=valid_D.__iter__(),
            validation_steps=len(valid_D),
            callbacks=self.callback(),
        )
        epoch = history.epoch[-1] + 1
        acc = history.history['acc'][-1]
        val_acc = history.history['val_acc'][-1]
        logger.info("model:{}  last_epoch:{}  train_acc{}  val_acc{}".format(
            self.model_code, epoch, acc, val_acc))
Example #34
0
 def operation(self,
               flag,
               index,
               section,
               buttonName,
               capabilityNamelist=[]):
     """
     :param flag,index,section,buttonName,capabilityNamelist=[]
     :return:
     """
     if self.is_usm_page() == True:
         if buttonName == "New" or (section == "Roles"
                                    and buttonName == "Edit"):
             self.ctrl_all(UsmEntity().get_input_textbox("name"))
             self.type(UsmEntity().get_input_textbox("name"),
                       "UI automation" + nowTime)
             self.ctrl_all(UsmEntity().get_input_textbox("description"))
             self.type(UsmEntity().get_input_textbox("description"),
                       BasePage(self.driver).randomData("string", 6))
             self.sleep(2)
             permission_list = [
                 "Read", "Update", "Create", "Delete", "MassUpdate"
             ]
             # capability_list = self.find_elements(UsmEntity.capabilitity_list)
             capability_item = None
             capability_index = []
             for i in capabilityNamelist:
                 self.ctrl_all(UsmEntity.filter)
                 self.type(UsmEntity.filter, i)
                 capability_list = self.find_elements(
                     UsmEntity.capabilitity_list)
                 for j, item in enumerate(capability_list):
                     if i == item.text:
                         capability_item = j
                         capability_index.append(capability_item)
                         for b in permission_list:
                             self.click(UsmEntity().get_capability(
                                 b, capability_index[0]))
                 if capability_item is None:
                     logger.info(msg="capabilityName %s not found!" % i)
             # for a in capability_index:
             #     for b in permission_list:
             #         self.click(UsmEntity().get_capability(b, a))
         elif section == "Users":
             self.ctrl_all(UsmEntity().get_input_textbox("userName"))
             self.type(UsmEntity().get_input_textbox("userName"),
                       BasePage(self.driver).randomData("string", 6))
             self.ctrl_all(UsmEntity().get_input_textbox("firstName"))
             self.type(UsmEntity().get_input_textbox("firstName"),
                       BasePage(self.driver).randomData("string", 6))
             self.ctrl_all(UsmEntity().get_input_textbox("lastName"))
             self.type(UsmEntity().get_input_textbox("lastName"),
                       BasePage(self.driver).randomData("string", 6))
         elif buttonName == "Inactivate":
             self.click(UsmEntity.confirm_delete)
         if buttonName == "New" or buttonName == "Edit":
             if index == 1:
                 if section == "Roles":
                     self.click(UsmEntity().get_button("Next"))
                     list = "Users"
                 else:
                     list = "Roles"
                 if flag == "add":
                     self.type(
                         UsmEntity.filter,
                         self.find_element(UsmEntity().get_list_value(
                             "available" + list + "Selected", "1")).text)
                     Select(
                         self.find_element(UsmEntity().get_two_list(
                             "available" + list +
                             "Selected"))).select_by_index(0)
                     self.sleep(1)
                     self.click(UsmEntity().get_button("Add"))
                 elif flag == "remove":
                     Select(
                         self.find_element(UsmEntity().get_two_list(
                             "assigned" + list +
                             "Selected"))).select_by_index(0)
                     self.sleep(1)
                     self.click(UsmEntity().get_button("Remove"))
             else:
                 if section == "Roles":
                     self.click(UsmEntity().get_switch_tab("Users"))
                     list = "Users"
                 else:
                     list = "Roles"
                 if flag == "add":
                     self.ctrl_multiSelect(
                         UsmEntity().get_list_value(
                             "available" + list + "Selected", "1"),
                         UsmEntity().get_list_value(
                             "available" + list + "Selected", "2"))
                     self.sleep(1)
                     self.click(UsmEntity().get_button("Add"))
                 elif flag == "remove":
                     self.ctrl_multiSelect(
                         UsmEntity().get_list_value(
                             "assigned" + list + "Selected", "1"),
                         UsmEntity().get_list_value(
                             "assigned" + list + "Selected", "2"))
                     self.sleep(1)
                     self.click(UsmEntity().get_button("Remove"))
             self.click(UsmEntity().get_button("Finish"))
         self.sleep(2)
         if "successfully" in CustomerRecordPage(
                 self.driver).get_tips_msg():
             return True
         else:
             return False
Example #35
0
 def doFinish(self):
     logger.info('Face count: %s' % self.face_count)
     logger.info('Frame with face: %s' % self.detected_frame_count)
Example #36
0
    def search(self, key_word, only_need_first=True, needed_pages=50):
        """
        搜索
        :param key_word: 关键字
        :param only_need_first: 只需要第一条
        :param needed_pages: 需要多少页
        :return:
        """
        # Todo 不需要详情页和评论,只需要首页搜索 不需要cookie
        assert isinstance(key_word, str)
        assert key_word != None or key_word.strip() != ''
        if self.custom_search_url != '':
            key_word = self.custom_search_url
        logger.info('开始搜索:' + key_word)
        # header = self.get_header()
        for i in tqdm(range(1, needed_pages + 1), desc='页数'):
            # 针对只需要收条的情况,跳出页数循环
            if only_need_first is True and i != 1:
                break

            url = 'http://www.dianping.com/search/keyword/' + str(
                self.location_id) + '/' + str(
                    self.channel_id) + '_' + str(key_word) + '/p' + str(i)
            # 第一页不是用页码标识,会出发验证码
            if i == 1:
                url = 'http://www.dianping.com/search/keyword/' + str(
                    self.location_id) + '/' + str(
                        self.channel_id) + '_' + str(key_word)

                # 替换url,并且过滤掉p1(p1会触发验证码)
            if self.custom_search_url != '':
                url = self.custom_search_url + str(i)
                if i == 1:
                    url = self.custom_search_url[:-1]
            r = requests_util.get_requests(url, request_type='search')
            # r = requests.get(url, headers=header)
            text = r.text
            # 获取加密文件
            file_map = get_search_map_file(text)
            # 替换加密文件
            text = requests_util.replace_search_html(text, file_map)

            # 网页解析
            html = BeautifulSoup(text, 'lxml')
            shop_all_list = html.select('.shop-list')[0].select('li')

            search_res = []
            for shop in shop_all_list:
                try:
                    image_path = shop.select('.pic')[0].select('a')[0].select(
                        'img')[0]['src']
                except:
                    image_path = '-'
                try:
                    shop_id = shop.select('.txt')[0].select('.tit')[0].select(
                        'a')[0]['data-shopid']
                except:
                    shop_id = '-'
                try:
                    detail_url = shop.select('.txt')[0].select(
                        '.tit')[0].select('a')[0]['href']
                except:
                    detail_url = '-'
                try:
                    name = shop.select('.txt')[0].select('.tit')[0].select(
                        'a')[0].text.strip()
                except:
                    name = '-'
                # 两个star方式,有的页面显示详细star分数,有的显示icon
                # 解析icon
                try:
                    star_point = \
                        shop.select('.txt')[0].select('.comment')[0].select('.star_icon')[0].select('span')[0]['class'][
                            1].split('_')[1]
                    star_point = float(star_point) / 10
                    star_point = str(star_point)
                except:
                    star_point = '-'
                # 解析详细star
                try:
                    star_point = \
                        shop.select('.txt')[0].select('.comment')[0].select('.star_score')[0].text
                    star_point = float(star_point)
                    star_point = str(star_point)
                except:
                    pass
                try:
                    review_number = shop.select('.txt')[0].select(
                        '.comment')[0].select('.review-num')[0].text.replace(
                            '\n', '')
                except:
                    review_number = '-'
                try:
                    mean_price = shop.select('.txt')[0].select('.comment')[
                        0].select('.mean-price')[0].select('b')[0].text
                except:
                    mean_price = '¥0'
                try:
                    tags = shop.select('.txt')[0].select(
                        '.tag-addr')[0].select('.tag')
                    tag1 = tags[0].text.replace('\n', ' ').strip()
                    tag2 = tags[1].text.replace('\n', ' ').strip()
                except:
                    tag1 = '-'
                    tag2 = '-'
                try:
                    addr = shop.select('.txt')[0].select(
                        '.tag-addr')[0].select('.addr')[0].text.replace(
                            '\n', ' ').strip()
                except:
                    addr = '-'
                try:
                    recommend = shop.select('.recommend')[0].text.replace(
                        '\n', ' ').strip()
                except:
                    recommend = '-'
                try:
                    commend_list = shop.select(
                        '.comment-list')[0].text.replace('\n', ' ').strip()
                except:
                    commend_list = '-'
                one_step_search_res = [
                    shop_id, name, star_point, review_number, mean_price, tag1,
                    tag2, addr, recommend, commend_list, image_path,
                    detail_url, '1', '1'
                ]  # 最后两位是搜索标记
                # 这个数据结构暂时没用
                search_res.append(one_step_search_res)
                # 只要首条,跳出
                if only_need_first is True:
                    break
                # 解析详情页
                if self.need_detail == '1':
                    try:
                        detail = Detail().get_detail(shop_id)
                        print('\n' + ','.join(detail) + '\n')
                        self.saver.save_data([detail], 'detail')
                    except:
                        # 设置标记
                        one_step_search_res[-2] = 0
                        logger.warning('详情信息获取失败,失败id:' + shop_id)
                        print('\n' + ','.join(one_step_search_res) + '\n')
                        if self.jump_wait is False:
                            print(
                                '检查浏览器,处理验证码,输入y程序继续运行,输入n跳过检查',
                                'http://www.dianping.com/shop/' + str(shop_id))
                            if input() == 'y':
                                continue
                            elif input() == 'n':
                                self.jump_wait = True
                else:
                    print('\n' + ','.join(one_step_search_res) + '\n')
                # 解析评论页
                if self.need_comment == '1':
                    try:
                        review = Review().get_review(shop_id)
                        print('获取', name, '评论', len(review), '条')
                        self.saver.save_data(review, 'review')
                    except:
                        # 设置标记
                        one_step_search_res[-1] = 0
                        logger.warning('评论获取失败,失败id:' + shop_id)

                # 保存数据
                self.saver.save_data([one_step_search_res], 'search')
        logger.info('解析完成:' + key_word)
Example #37
0
    # testing contexts
    ctx = [mx.gpu(int(i)) for i in cfg.gpus]
    ctx = ctx if ctx else [mx.cpu()]
    num_anchors = len(cfg.anchor_scales) * len(cfg.anchor_ratios)

    test_dataset = get_dataset(cfg.dataset, cfg.dataset_path)
    test_data = get_dataloader(test_dataset, cfg)
    cfg.dataset_size = len(test_dataset)

    cfg.num_classes = len(test_dataset.classes)
    cfg.classes = test_dataset.classes

    net = FasterRCNN(network=cfg.network, pretrained_base=False, batch_size=cfg.batch_size, num_classes=cfg.num_classes,
                     scales=cfg.anchor_scales, ratios=cfg.anchor_ratios, feature_stride=cfg.feature_stride,
                     allowed_border=cfg.allowed_border, rpn_batch_size=cfg.rpn_batch_size,
                     rpn_fg_fraction=cfg.rpn_fg_fraction, rpn_positive_threshold=cfg.rpn_positive_threshold,
                     rpn_negative_threshold=cfg.rpn_negative_threshold,
                     rpn_pre_nms_top_n=cfg.rpn_test_pre_nms_top_n, rpn_post_nms_top_n=cfg.rpn_test_post_nms_top_n,
                     rpn_nms_threshold=cfg.rpn_nms_threshold,
                     rpn_min_size=cfg.rpn_min_size, roi_batch_size=cfg.roi_batch_size,
                     roi_fg_fraction=cfg.roi_fg_fraction, roi_fg_threshold=cfg.roi_fg_threshold,
                     roi_bg_threshold_hi=cfg.roi_bg_threshold_hi, roi_bg_threshold_lo=cfg.roi_bg_threshold_lo,
                     bbox_nms_threshold=cfg.bbox_nms_threshold, bbox_nms_top_n=cfg.bbox_nms_top_n,
                     bbox_mean=cfg.bbox_mean, bbox_std=cfg.bbox_std)

    net.load_parameters(cfg.model_params.strip(), ctx=ctx)

    map_name, mean_ap = test_faster_rcnn(net, test_data, cfg)
    result_msg = '\n'.join(['%s=%f' % (k, v) for k, v in zip(map_name, mean_ap)])
    logger.info('[Done] Test Results: \n%s' % result_msg)
Example #38
0
 def _print(self):
     logger.info('Dataset name: {}'.format(self.dataset_name))
     logger.info('Number of training samples: {:,}'.format(
         self.num_train_samples))
     logger.info('Number of validation samples: {:,}'.format(
         self.num_val_samples))
Example #39
0
    def training(self):
        self.model.train()

        num_img_tr = len(self.train_loader)
        logger.info('\nTraining')

        max_iter = self.args.training.max_iter
        it = self.start_it

        # support multiple optimizers, but only one 
        # optimizer is used here, i.e., names = ['match']
        names = self.args.training.optimizer.keys()

        while it < max_iter:
            for samples in self.train_loader:
                samples = to_cuda(samples)

                # validation
                val_iter = self.args.training.get('val_iter', -1)
                if val_iter > 0 and it % val_iter == 0 and it >= self.args.training.get('start_eval_it', 15000):
                    self.validation(it, 'val')
                    self.model.train()

                if it % 100 == 0:
                    logger.info('\n===> Iteration  %d/%d' % (it, max_iter))
    
                # update class weights
                if it >= 500 and self.args.training.get('weight_update_iter', -1) > 0 and it % self.args.training.get('weight_update_iter', -1) == 0:
                    self.model.update_hard()
                    logger.info('\nUpdate hard ID: %.3f'%self.model.center.ratio)
                    self.writer.add_scalar('train/data_ratio', self.model.center.ratio, it)

                for name in names:
                    losses = dict()

                    self.optimizer[name].zero_grad()
                    outputs = self.model(samples, type=name)
                    losses = self.criterion(outputs, name)
                    loss = losses['loss']
                    loss.backward()
                    self.optimizer[name].step()

                    losses.update(losses)

                    # log training loss
                    if it % 100 == 0:
                        loss_log_str = '=>%s   loss: %.4f'%(name, loss.item())
                        for loss_name in losses.keys():
                            if loss_name != 'loss':
                                loss_log_str += '    %s: %.4f'%(loss_name, losses[loss_name])
                                self.writer.add_scalar('train/%s_iter'%loss_name, losses[loss_name], it)
                        logger.info(loss_log_str)
                        self.writer.add_scalar('train/total_loss_iter_%s'%name, loss.item(), it)

                    # adjust learning rate
                    lr_decay_iter = self.args.training.optimizer[name].get('lr_decay_iter', None)
                    if lr_decay_iter is not None:
                        for i in range(len(lr_decay_iter)):
                            if it == lr_decay_iter[i]:
                                lr = self.args.training.optimizer[name].lr * (self.args.training.optimizer[name].lr_decay ** (i+1))
                                logger.info('\nReduce lr to %.6f\n'%(lr))
                                for param_group in self.optimizer[name].param_groups:
                                    param_group["lr"] = lr 
                                break

                it += 1

                # save model and optimizer
                if it % self.args.training.save_iter == 0 or it == max_iter or it == 1:
                    logger.info('\nSaving checkpoint ......')
                    optimizer_to_save = dict()
                    for i in self.optimizer.keys():
                        optimizer_to_save[i] = self.optimizer[i].state_dict()
                    self.saver.save_checkpoint({
                        'start_it': it,
                        'stage': self.stage,
                        'state_dict': self.model.state_dict(),
                        'optimizer': optimizer_to_save,
                    }, filename='ckp_%06d.pth.tar'%it)
                    logger.info('Done.')
Example #40
0
def train_classification(data_loader, model, criterion, optimizer, scheduler,
                         num_epochs, device, data_type):
    """
    - training for image classification
    - One epoch's training.

    :param data_loader:
    :param model:
    :param criterion:
    :param optimizer:
    :param scheduler:
    :param num_epochs:
    :param device:
    :return:
    """
    print_freq = 200  # print training or validation status every __ batches
    model.train()  # training mode enables dropout

    batch_time = AverageMeter()  # forward prop. + back prop. time
    data_time = AverageMeter()  # data loading time
    losses = AverageMeter()  # loss

    start = time.time()

    # Batches
    for i, (images, labels) in enumerate(data_loader):
        data_time.update(time.time() - start)

        # Move to default device
        images = images.to(dtype=data_type)
        images = images.to(device)  # (batch_size (N), 3, 300, 300)
        labels = labels.to(device)

        # Forward prop.
        outputs = model(images)

        # Loss
        loss = criterion(outputs, labels)

        # Backward prop.
        optimizer.zero_grad()
        loss.backward()

        # # Clip gradients, if necessary
        # if grad_clip is not None:
        #     clip_gradient(optimizer, grad_clip)

        # Update models
        optimizer.step()

        losses.update(loss.item(), images.size(0))
        batch_time.update(time.time() - start)

        start = time.time()

        # Print status
        if i % print_freq == 0:
            logger.info(
                'Epoch: [{0}][{1}/{2}]\t'
                'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    num_epochs,
                    i,
                    len(data_loader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses))
Example #41
0
            defer.addCallback(lambda ret: dev.SerializeToString() if ret == RetNo.SUCCESS else None)
            return defer
        d.addCallback(callback)
        return d


@gatewayServiceHandle
def push_ack(conn, data):
    try:
        ack = PushMessageAck()
        ack.ParseFromString(data)
        ids = json.loads(ack.ids)
    except Exception, e:
        logger.error(e)
        return None
    logger.info('ack message ids:%s, did:%s' % (ids, conn.device_id))
    remote.callRemote('messages_ack', conn.device_id, ids)


@routerServiceHanlde
def push(did, data):
    connID = ConnectionMapping.get(did, None)
    if connID is not None:
        _push(connID, did, data)


def _push(connID, did, data):
    msg = PushMessage()
    msg.id = data['id']
    msg.sendno = data['sendno']
    msg.generator = data['generator']
Example #42
0
        break

except RobotError, e:
    logger.error("{0} robot Event:{1}".format(robot.get_local_time(), e))

finally:
    # 断开服务器链接
    if robot.connected:
        # 关闭机械臂
        robot.robot_shutdown()
        # 断开机械臂链接
        robot.disconnect()
        print("Disconnected")
    # 释放库资源
    WuziRobot.uninitialize()
    logger.info("{0} test completed.".format(WuziRobot.get_local_time()))

# Stop streaming
pipeline.stop()

import time
time.sleep(1)

robot_first = True
controller = Controller(True)

if robot_first:
    controller.action(None)
new_chess = controller.wait_human()

while 1:
Example #43
0
if __name__ == '__main__':
    # trainLabbeledPath = os.path.join(lmdb_root_path, 'train-labelled')
    # trainUnLabelledPath = os.path.join(lmdb_root_path, 'train-unlabelled')
    # testCleanPath = os.path.join(lmdb_root_path, 'test-clean')
    # testOtherPath = os.path.join(lmdb_root_path, 'test-other')
    trainAirtelPaymentsPath = os.path.join(lmdb_airtel_payments_root_path,
                                           'train-labelled-en')
    testAirtelPaymentsPath = os.path.join(lmdb_airtel_payments_root_path,
                                          'test-labelled-en')
    # data_train = lmdbMultiDataset(roots=[trainLabbeledPath, trainUnLabelledPath, trainAirtelPaymentsPath], transform=image_train_transform)
    # data_test = lmdbMultiDataset(roots=[testAirtelPaymentsPath, testCleanPath, testOtherPath], transform=image_val_transform)
    data_train = lmdbMultiDataset(roots=[trainAirtelPaymentsPath],
                                  transform=image_train_transform)
    data_test = lmdbMultiDataset(roots=[testAirtelPaymentsPath],
                                 transform=image_val_transform)
    logger.info(f'Number of examples in train is {len(data_train)}')
    logger.info(f'Number of examples in test is {len(data_test)}')
    for epoch in range(1):
        data_loader_train = torch.utils.data.DataLoader(
            data_train,
            batch_size=train_batch_size,
            shuffle=True,
            num_workers=workers,
            pin_memory=True,
            collate_fn=allign_collate)
        data_loader_test = torch.utils.data.DataLoader(
            data_test,
            batch_size=train_batch_size,
            shuffle=True,
            num_workers=workers,
            pin_memory=True,
    logger.info('dev mode activated... overriding parameters:')
    print('                                --debug_mode = True,')
    print('                                --use_samples = True,')
    print('                                --max_nsents = 25,')
    print('                                --intrasentsim_samplesize = 10,')
    print('                                --isotropycorrection_samplesize = 25')
    print('                                --selfsim_samplesize = 1')
    opts.debug_mode = True
    opts.max_nsents = 25
    #opts.use_samples=False
    opts.intrasentsim_samplesize = 10
    opts.isotropycorrection_samplesize = 25
    opts.selfsim_samplesize = 3
    #opts.save_results = True
    #opts.huggingface_models = 'bert-base-uncased'


if __name__ == '__main__':
    parser = opts.get_parser()
    opt = parser.parse_args()
    if opt.dev_params:
        update_opts_to_devmode(opt)
    
    if opt.debug_mode:
        logger.info('HUOM! debug mode activated... ipdb will be launched on exeption ... ')

        with ipdb.launch_ipdb_on_exception():
            main(opt)
    else:
        main(opt)
Example #45
0
        id_bar = driver.find_element(By.CSS_SELECTOR, 'input[name="txtMRNo"]')
        id_bar.send_keys(patient['id'])
        time.sleep(300)
        return
    except Exception as e:
        print(f'execute {patient} failed, {e}')
        logger.exception(f'execute {patient} failed, {e}')
        return


if __name__ == "__main__":
    patient_list = []
    # read file
    print('start register')
    logger.info('start register')
    try:
        dir_path = os.path.dirname(os.path.realpath(__file__))
        print('show dir path %s' % dir_path)
        # TODO check OS
        # if os = mac, encoding = utf-8
        # else if windows, endcoding = utf-8-sig
        with open('patients.json', 'r+') as patient_file:
            patient_list = json.load(patient_file)
        print(f'patient list {patient_list}')
        logger.info(f'patient list {patient_list}')
    except Exception as e:
        logger.exception(f'failed to read file, {e}')
        print(f'failed to read file, {e}')
        patient_list = []
def compute_similarity_metrics(w2s, modname, embeddings):
    N_LAYERS, _ , HDIM = embeddings[0].shape
    nsents= len(embeddings) // 2 if w2s['target'] else len(embeddings)
    # BASELINES [FOR ANISOTROPY CORRECTION]
    logger.info('   computing baselines for anisotropy correction ')
    b1, b3 = Sim.get_baselines(embeddings, w2s, N_LAYERS)

    # SELF-SIMILARITY & MAXIMUM EXPLAINABLE VARIANCE
    logger.info('   computing self-similarity and max explainable variance ')
    selfsim = torch.zeros((len(w2s['source'].w2sdict), N_LAYERS))
    mev = torch.zeros((len(w2s['source'].w2sdict), N_LAYERS ))
    for wid, occurrences in tqdm(enumerate(w2s['source'].w2sdict.values())):
        for layer in range(N_LAYERS):
            embs4thisword = torch.zeros((len(occurrences), HDIM))
            for i, idx_tuple in enumerate(occurrences):
                sentence_id, word_id = idx_tuple
                embs4thisword[i,:] = embeddings[sentence_id][layer, word_id,:]

            selfsim[wid, layer] = Sim.self_similarity(embs4thisword).item() #- b1[layer]
            mev[wid, layer] = Sim.max_expl_var(embs4thisword) #- b3[layer]
    
    if w2s['target']:
        N_LAYERS_dec, _ , HDIM = embeddings[-1].shape
        selfsim_dec = torch.zeros((len(w2s['target'].w2sdict), N_LAYERS_dec))
        mev_dec = torch.zeros((len(w2s['target'].w2sdict), N_LAYERS_dec ))
        for wid, occurrences in tqdm(enumerate(w2s['target'].w2sdict.values())):
            for layer in range(N_LAYERS_dec):
                embs4thisword = torch.zeros((len(occurrences), HDIM))
                for i, idx_tuple in enumerate(occurrences):
                    sentence_id, word_id = idx_tuple
                    embs4thisword[i,:] = embeddings[sentence_id][layer, word_id,:]

                selfsim_dec[wid, layer] = Sim.self_similarity(embs4thisword).item() #- b1[layer]
                mev_dec[wid, layer] = Sim.max_expl_var(embs4thisword) #- b3[layer]

    # INTRA-SENTENCE SIMILARITY
    logger.info('   computing intra-sentence similarity ')
    insentsim = torch.zeros((nsents, N_LAYERS)) 
    for layer in range(N_LAYERS):
        logger.info(f'        layer: {layer}')
        for sid in range(nsents): 
            insentsim[sid,layer] = Sim.intra_similarity(embeddings[sid][layer,:,:]) #- b2[layer]

    if w2s['target']:
        insentsim_dec = torch.zeros((nsents, N_LAYERS_dec)) 
        for layer in range(N_LAYERS_dec):
            logger.info(f'        layer: {layer+N_LAYERS}')
            for sid in range(nsents): 
                insentsim_dec[sid,layer] = Sim.intra_similarity(embeddings[nsents+sid][layer,:,:]) #- b2[layer]

    logger.info('    intra sentence similarity finished')

    #b2 = insentsim.mean(dim=0)
    #b3bis = mev.mean(dim=0)# INTERPRETATION 2: baseline_metric3 <- this is the WRONG one
    metricsdict = {'selfsim':selfsim,
                   'selfsim_isotropic': selfsim - b1[:N_LAYERS],
                   'intrasentsim': insentsim,
                   'intrasentsim_isotropic': insentsim - b1[:N_LAYERS],
                   'mev': mev,
                   'mev_isotropic': mev - b3[:N_LAYERS],
                   'baseline1': b1,
                   'baseline3': b3  }
    
    if w2s['target']:
        metricsdict.update(
            selfsim_decoder=selfsim_dec,
            selfsim_decoder_isotropic=selfsim_dec - b1[-N_LAYERS_dec:],
            mev_decoder=mev_dec,
            mev_decoder_isotropic=mev_dec - b3[-N_LAYERS_dec:],
            insentsim_decoder=insentsim_dec,
            insentsim_decoder_isotropic=insentsim_dec - b1[-N_LAYERS_dec:]
        )

    return {k:v.numpy() for k,v in metricsdict.items()}
Example #47
0
def show_parameters(model):
    trainable_param_counter = defaultdict(float)
    logger.info('Trainable Parameters:')
    for name, param in model.named_parameters():
        if param.requires_grad:
            prefix = name.split('.')[0]
            trainable_param_counter[prefix] += param.nelement()
            logger.info('{}-{}-{}-{}'.format(name, param.shape, param.dtype, param.device))
    logger.info('-------------')
    trainable_sum = 0
    for key in trainable_param_counter.keys():
        logger.info('[PARAMS-COUNTING] #%s:%.2fM' % (key, trainable_param_counter[key] / 1e6))
        trainable_sum += trainable_param_counter[key]
    logger.info('[PARAMS-SUM] #%s:%.2fM' % ('Trainable', trainable_sum / 1e6))

    non_trainable_param_counter = defaultdict(float)
    logger.info('###########')
    logger.info('Non-Trainable Parameters:')
    for name, param in model.named_parameters():
        if param.requires_grad is False:
            prefix = name.split('.')[0]
            non_trainable_param_counter[prefix] += param.nelement()
            logger.info('{}-{}-{}-{}'.format(name, param.shape, param.dtype, param.device))
    logger.info('-------------')
    non_trainable_sum = 0
    for key in non_trainable_param_counter.keys():
        logger.info('[PARAMS-COUNTING] #%s:%.2fM' % (key, non_trainable_param_counter[key] / 1e6))
        non_trainable_sum += non_trainable_param_counter[key]
    logger.info('[PARAMS-SUM] #%s:%.2fM' % ('Non-Trainable', non_trainable_sum / 1e6))
    logger.info('-------------')
    logger.info('[PARAMS-SUM] #%s:%.2fM' % ('Total', (trainable_sum + non_trainable_sum) / 1e6))
def  main(opt):
    
    if not opt.load_embeddings_path:
        samples = load_samples(opt)
        toks_and_embeddings_paths = Emb.compute_embeddings(opt, samples)
    else:
        toks_and_embeddings_paths = Emb.emul_compute_embeddings(opt)

    
    if opt.only_save_embs:
        print('Saved embeddings ... finishing job')
        raise SystemExit 

    logger.info('Loading embeddings & tokenized sentences...') 
    metrics={}
    for modelname, paths in toks_and_embeddings_paths.items():
        tokdsents, embs = Emb.load_embeddings(paths, opt.max_nsents)
        logger.info(f'[*] MODEL: {modelname} ')
        w2s=make_indexer(opt=opt,sents=tokdsents,modname=modelname)
        logger.info('Computing metrics')

        metrics[modelname] = compute_similarity_metrics(w2s, modelname, embs)
        logger.info('Saving metrics')
        dump_similarity_metrics(opt, metrics[modelname],modelname)

    if opt.plot_results:
        Plt.makeplot(metrics,opt.outdir)
    else:
        logger.info('finishing ... to make the plots call `utils/ploting.py path/to/savedMetrics.pkl` ')
        logger.info(' or use option --plot_results')
    def train(self):
        if self.conf.need_to_load:
            self.load(self.conf.checkpoint_dir)

        data = glob(os.path.join(self.conf.data, "*.%s" % self.conf.img_format))
        logger.info('Total amount of images: %s' % len(data))
        # np.random.shuffle(data)

        d_fr_optim = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        d_fr_optim = d_fr_optim.minimize(self.d_fr_loss, var_list=self.d_fr_vars)

        d_s_n_optim = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        d_s_n_optim = d_s_n_optim.minimize(self.d_stego_loss_total, var_list=self.d_s_n_vars)

        g_optim_fake = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        g_optim_fake = g_optim_fake.minimize(self.g_loss, var_list=self.g_vars)

        # g_optim_stego = tf.train.AdamOptimizer(0.000005, beta1=0.9)
        # g_optim_stego = g_optim_stego.minimize(self.g_loss_stego, var_list=self.g_vars)

        merged = tf.merge_all_summaries()
        train_writer = tf.train.SummaryWriter('./logs_sgan', self.sess.graph)

        tf.initialize_all_variables().run()

        sample_z = np.random.uniform(-1, 1, size=(self.sample_size, self.z_dim))
        sample_files = data[0:self.sample_size]
        sample = [get_image(sample_file, self.image_size, need_transform=True) for sample_file in sample_files]
        sample_images = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        batch_idxs = min(len(data), self.conf.train_size) / self.conf.batch_size

        logger.debug('Starting updating')
        for epoch in range(self.conf.epoch):
            stego_losses, fake_real_losses, generator_losses = [], [], []

            logger.info('Starting epoch %s' % epoch)

            for idx in range(0, int(batch_idxs)):
                batch_files = data[idx * self.conf.batch_size:(idx + 1) * self.conf.batch_size]
                batch = [get_image(batch_file, self.image_size, need_transform=True) for batch_file in batch_files]
                batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(-1, 1, [self.conf.batch_size, self.z_dim]).astype(np.float32)

                self.sess.run(d_fr_optim, feed_dict={self.images: batch_images, self.z: batch_z})
                self.sess.run(d_s_n_optim, feed_dict={self.images: batch_images, self.z: batch_z})

                self.sess.run(g_optim_fake, feed_dict={self.z: batch_z})
                self.sess.run(g_optim_fake, feed_dict={self.z: batch_z})

                # # if epoch > 5:
                # self.sess.run(g_optim_stego, feed_dict={self.z: batch_z})

                # errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                # errD_real = self.d_loss_real.eval({self.images: batch_images})
                #
                # errD_stego = self.d_loss_stego.eval({self.z: batch_z})
                # errD_n_stego = self.d_loss_nonstego.eval({self.z: batch_z})
                #
                # errG = self.g_loss.eval({self.z: batch_z})
                #
                # fake_real_losses.append(errD_fake + errD_stego)
                # stego_losses.append(errD_stego + errD_n_stego)
                # generator_losses.append(errG)
                #
                logger.debug("[ITERATION] Epoch [%2d], iteration [%4d/%4d] time: %4.4f" %
                             (epoch, idx, batch_idxs, time.time() - start_time))
                # logger.debug('[LOSS] Real/Fake: %.8f' % (errD_fake + errD_real))
                # logger.debug('[LOSS] Stego/Non-Stego: %.8f' % (errD_stego + errD_n_stego))
                # logger.debug('[LOSS] Generator: %.8f' % errG)

                counter += 1

                if np.mod(counter, 1000) == 0:
                    self.save(self.conf.checkpoint_dir, counter)

                if np.mod(counter, 300) == 0:
                    logger.info('Save samples')
                    samples, d_loss, g_loss = self.sess.run(
                        [self.sampler, self.d_fr_loss, self.g_loss_fake,
                         ],
                        feed_dict={self.z: sample_z, self.images: sample_images}
                    )
                    save_images_to_one(samples, [8, 8], './samples/train_%s_%s.png' % (epoch, idx))
Example #50
0
    def run(self, _start_time, _end_time):
        """
         只计算聚类
         """
        # 记录job开始日志
        cluster_job_log = ClusterJobLog()
        job_id = cluster_job_log.save_auto_job(self.is_manual, self.get_group_id, self.language_type, self.save_group_id, self.week_fag)

        # 开始执行
        # exe_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        # 从mysql中读取数据
        corpus = []
        base_data_view = BaseDataView()
        if self.is_manual == 2:  # 涉华
            corpus = base_data_view.get_involved_china_corpus(_start_time, _end_time, self.language_type,
                                                              self.get_group_id)
        if self.is_manual == 0:
            corpus = base_data_view.get_corpus(_start_time, _end_time, self.language_type, self.get_group_id)

        corpus_length = len(corpus)

        if corpus_length == 0:
            logger.info('returning run, because len(lise_size) == 0, {start_time: %s, end_time: %s}.' % (
                _start_time, _end_time))

            #  更新job日志
            cluster_job_log.update_job_log(job_id, 0)
            return

        # 聚类
        if self.week_fag == 1:  # 周数据太多, 层次聚类 O(n^3), 太慢; 更换为 dbscan聚类
            cluster_results = dbscan_twice_cluster.perform_cluster(self.is_manual, self.cluster_type,
                                                                   self.manual_id, self.subtopic_id,
                                                                   self.language_type, corpus,
                                                                   self.save_group_id)
            cluster_dao = ClusterResultWeek()
        else:
            if self.language_type == 1:  # 英文聚类
                if self.is_manual == 2:  # 涉华
                    min_sample = 1
                else:
                    min_sample = 2
                cluster_results = hierarchical_cluster.perform_cluster(self.is_manual, self.cluster_type,
                                                                       self.manual_id, self.subtopic_id,
                                                                       self.language_type, corpus,
                                                                       min_sample, self.save_group_id)
                cluster_dao = MergerClusterResult()

            else:  # 中文采用DBScan聚类算法
                cluster_results = dbscan_twice_cluster.perform_cluster(self.is_manual, self.cluster_type,
                                                                       self.manual_id, self.subtopic_id,
                                                                       self.language_type, corpus,
                                                                       self.save_group_id)
                cluster_dao = ClusterResult()

        # 将结果保存到mysql中
        exe_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        logger.info('time to mysql: %s; cluster_results. cluster num: %s' % (exe_time,
                                                                             len(cluster_results)))
        cluster_dao.save_cluster_results(cluster_results)

        # 清空上一批数据
        cluster_dao.delete_cluster_results(exe_time, self.is_manual, self.cluster_type,
                                           self.manual_id, self.subtopic_id,
                                           self.save_group_id)

        # 更新job日志
        cluster_job_log.update_job_log(job_id, len(cluster_results))

        logger.info('end run.')
Example #51
0
from datetime import datetime
from utils.logger import logger
from resource.token import Amil
from flask import Flask
from utils.cache import cache
from flask_restful import Api

app = Flask(__name__)

api = Api(app)

api.add_resource(Amil, '/amil')

if __name__ == '__main__':
    logger.info("Starting app %s", datetime.now())
    cache.init_app(app)
    app.run(debug=True)
Example #52
0
 def process_request(self, request):
     try:
         logger.info(
             '************************************************* 下面是新的一条日志 ***************************************************'
         )
         logger.info('拦截请求的地址:%s;请求的方法:%s' % (request.path, request.method))
         logger.info(
             '==================================== headers 头信息 ===================================================='
         )
         for key in request.META:
             if key[:5] == 'HTTP_':
                 logger.debug('%s %s' % (str(key), str(request.META[key])))
         logger.info('代理IP:%s' % request.META.get('REMOTE_ADDR'))
         logger.info(
             '真实IP:%s' %
             request.META.get('HTTP_X_FORWARDED_FOR'))  # HTTP_X_REAL_IP
         logger.info(
             '==================================== request body信息 =================================================='
         )
         logger.info('params参数:%s' % request.GET)
         if request.path == '/uploadfile/':
             logger.info('body参数:文件类型')
         else:
             logger.info('body参数:%s' % request.body.decode())
             # if 'application/x-www-form-urlencoded' in request.META['CONTENT_TYPE']:
             #     print('body参数:', urllib.parse.unquote(request.body.decode()))
         logger.info(
             '================================== View视图函数内部信息 ================================================'
         )
     except Exception as e:
         logger.error('发生错误:已预知的是上传文件导致,非预知错误见下:')
         logger.error('未知错误:%s' % str(e))
         return JsonResponse({
             "message": "出现了无法预料的错误:%s" % e,
             "errorCode": 1,
             "data": {}
         })
Example #53
0
def train_objectdetection(data_loader, model, criterion, optimizer, num_epochs,
                          device):
    """
    One epoch's training.
    :param train_loader: DataLoader for training data
    :param model: model
    :param criterion: MultiBox loss
    :param optimizer: optimizer
    :param epoch: epoch number
    """
    print_freq = 200  # print training or validation status every __ batches
    model.train()  # training mode enables dropout

    batch_time = AverageMeter()  # forward prop. + back prop. time
    data_time = AverageMeter()  # data loading time
    losses = AverageMeter()  # loss

    start = time.time()
    # Batches
    for i, (images, boxes, labels, _) in enumerate(data_loader):
        data_time.update(time.time() - start)

        # Move to default device
        images = images.to(device)  # (batch_size (N), 3, 300, 300)
        boxes = [b.to(device) for b in boxes]
        labels = [l.to(device) for l in labels]

        # Forward prop.
        predicted_locs, predicted_scores = model(
            images)  # (N, 8732, 4), (N, 8732, n_classes)

        # Loss

        # for i in range(len(boxes)):
        #  boxes[i] = boxes[i].to('cpu')
        #  labels[i] = labels[i].to('cpu')
        # print (predicted_locs, predicted_scores)
        # print (predicted_locs.shape, predicted_scores.shape)
        # print (len(boxes), len(labels))
        # print (boxes[1], labels[1])
        predicted_locs = predicted_locs.to(device)
        predicted_scores = predicted_scores.to(device)

        loss = criterion(predicted_locs, predicted_scores, boxes,
                         labels)  # scalar

        # Backward prop.
        optimizer.zero_grad()
        loss.backward()

        # # Clip gradients, if necessary
        # if grad_clip is not None:
        #     clip_gradient(optimizer, grad_clip)

        # Update model
        optimizer.step()

        losses.update(loss.item(), images.size(0))
        batch_time.update(time.time() - start)

        start = time.time()

        # Print status
        if i % print_freq == 0:
            logger.info(
                'Epoch: [{0}][{1}/{2}]\t'
                'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    num_epochs,
                    i,
                    len(data_loader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses))

    return losses.avg
Example #54
0
 def clean_cache(self, lib):
     lib_path = os.path.join(self.cache_libs_path, f'{lib}.zip')
     logger.info(f'Clean cache of {lib} at {lib_path}')
     FileUtils.remove_file(lib_path)
Example #55
0
def messages_ack(did, msg_ids):
    for msg_id in msg_ids:
        logger.info('ack message(id:%d, did:%s)' % (msg_id, did))
        msgQueue.rem_from_acking(did, msg_id)
Example #56
0
def auto_backup_job():
    for backup in BACKUP_DIR.glob("Auto*.zip"):
        backup.unlink()

    export_db(tag="Auto", template=None)
    logger.info("Auto Backup Called")
Example #57
0
 def go_to_av(self):
     self.find(self._audio_study).click()
     logger.info('进入视听学习页')
     return AVPage()
Example #58
0
def create_enriched_data_views(db_url):
    engine = create_engine(db_url)
    logger.info("[ENRICHED DATA] Start enriched data creation")

    create_table_regions_departments(engine)
    logger.info("[ENRICHED DATA] Imported regions departments table")
    create_enriched_offerer_data(engine)
    logger.info("[ENRICHED DATA] Created enriched offerer data")
    create_enriched_user_data(engine)
    logger.info("[ENRICHED DATA] Created enriched beneficiary user data")
    create_enriched_stock_data(engine)
    logger.info("[ENRICHED DATA] Created enriched stock data")
    create_enriched_offer_data(engine)
    logger.info("[ENRICHED DATA] Created enriched offer data")
    create_enriched_venue_data(engine)
    logger.info("[ENRICHED DATA] Created enriched venue data")
    create_enriched_booking_data(engine)
    logger.info("[ENRICHED DATA] Created enriched booking data")

    logger.info("[ENRICHED DATA] End enriched data creation")
    def book(user: User, hotel: Hotel, br: BookingRequirement) -> Booking:
        '''
        Books the set of rooms according to the BookingRequirement
        If the requirement is not met, raises
        '''
        ipdb.set_trace()
        logger.info('Finding available rooms')
        if not BookingService.meets_requirements(br):
            logger.error('Rooms not available matching the requirement')
            raise BookingRequirementNotMetException()

        logger.info('Finding available rooms...')
        available_rooms = BookingService.getAvailableRoomsInDateRange(
            hotel, br.from_date, br.to_date)
        logger.info('Found available rooms: ')
        logger.info(available_rooms)

        logger.info('Assigning rooms...')
        assigned_rooms = set()
        for room_type in [x.name for x in RoomType]:
            number = 0
            if room_type == RoomType.STANDARD:
                number = br.no_standard
            if room_type == RoomType.DELUXE:
                number = br.no_deluxe
            if room_type == RoomType.LUXURY:
                number = br.no_luxury
            if room_type == RoomType.SUITE:
                number = br.no_suite
            picked_rooms = BookingService.pickNRoomOfType(
                available_rooms, room_type, number)
            assigned_rooms.union(picked_rooms)

        logger.debug('Writing rooms to RoomDate')
        # Adding Room to RoomDate table
        for _assigned_room in assigned_rooms:
            for _date in getDatesInRange(br.from_date, br.to_date):
                room_date = RoomDate(_assigned_room, _date)
                room_dates.append(room_date)
        logger.debug('Finished writing rooms to RoomDate')

        booking = Booking(user, hotel, assigned_rooms, br.from_date,
                          br.to_date)
        bookings.append(booking)
        logger.info("Booking completed for user %s. Booking id: %s", user,
                    booking.booking_id)
        logger.info(booking)
        return booking
Example #60
0
def test_data(loader, name, epoch):
    max_length = 0
    start = time.time()
    min_ratios = []
    audio_size = []
    for i, x in enumerate(tqdm(loader)):
        img, label, label_lengths, image_lengths = x
        max_length = max(max_length, label_lengths.max().item())
        ratios = img.shape[2] / label_lengths.max().item()
        audio_size.append(img.shape[2])
        random_index = np.random.choice(range(len(label_lengths)))
        sample_label_index_start = label_lengths[:random_index].sum().item()
        sample_label_index_end = sample_label_index_start + label_lengths[
            random_index].item()
        sample_label = label[
            sample_label_index_start:sample_label_index_end].cpu().tolist()
        sample_text = label_re_transform(sample_label)
        logger.info(f"Sample text is: {sample_text}")
        min_ratios.append(ratios)
    end = time.time()
    logger.info(
        f"Average size, min, max, median of audio is {np.mean(audio_size)}, {np.min(audio_size)}, {np.max(audio_size)}, {np.median(audio_size)}"
    )
    logger.info(f"Longest length of text in {name} dataset is {max_length}")
    logger.info(
        f"min_ratio text in {np.min(min_ratios)} and max_ratio is {np.max(min_ratios)}, avg is {np.mean(min_ratios)}, median is {np.median(min_ratios)}"
    )
    logger.info(
        f"Ratio historgram is {np.histogram(min_ratios, density=True)}")
    logger.info(f"Time taken for epoch {epoch} was {end-start:.3f}s")