示例#1
0
    def validation(self, val_sents, batch_size, log_file=None):
        """
        :param val_sents: validation sentences.
        :type val_sents: a list, each element a ndarray
        :return: tuple
        """
        code_len = 0.
        nb_words = 0.
        loss = 0.0
        unrm = 0.0
        sq_unrm = 0.0

        for sents in val_sents:
            x = [self.negative_sample(sents)]
            loss_, code_len_, nb_words_, unrm_, sq_unrm_ = self._test_loop(self._test, x, batch_size)
            nb_words += nb_words_
            code_len += code_len_
            loss += loss_ * nb_words_
            unrm += unrm_
            sq_unrm += sq_unrm_

        loss /= nb_words
        ppl = math.exp(code_len/nb_words)
        mean_unrm = unrm / nb_words
        mean_sq_unrm = sq_unrm / nb_words
        std_unrm = mean_sq_unrm - mean_unrm * mean_unrm
        logger.info('%s:Val val_loss: %.2f - val_ppl: %.2f - partition: mean: %.2f std: %.2f' %
                    (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm))
        log_file.info('%s:Val val_loss: %.6f - val_ppl: %.6f - partition: mean: %.6f std: %.6f' %
                      (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm))

        return loss, ppl, mean_unrm, std_unrm
示例#2
0
 def change_review_visibility(self, review_id: str, visible: bool) -> dict:
     logger.info(
         f'Changing review {review_id} to {"visible" if visible else "invisible"}'
     )
     url = self._build_url('profile', self.username, 'review', review_id)
     payload = {'hidden': not visible}
     return self._json(self._put(url, payload), 200)
示例#3
0
 def get_orders(self, force=False):
     if self.orders_json and not force:
         return self.orders_json
     logger.info(f'Getting orders for profile: {self.username}')
     url = self._build_url('profile', self.username, 'orders')
     self.orders_json = self._json(self._get(url), 200)
     return self.orders_json
    def validation(self, val_sents, batch_size, log_file=None):
        """
        :param val_sents: validation sentences.
        :type val_sents: a list, each element a ndarray
        :return: tuple
        """
        code_len = 0.
        nb_words = 0.
        loss = 0.0
        unrm = 0.0
        sq_unrm = 0.0

        for sents in val_sents:
            x = [self.negative_sample(sents)]
            loss_, code_len_, nb_words_, unrm_, sq_unrm_ = self._test_loop(
                self._test, x, batch_size)
            nb_words += nb_words_
            code_len += code_len_
            loss += loss_ * nb_words_
            unrm += unrm_
            sq_unrm += sq_unrm_

        loss /= nb_words
        ppl = math.exp(code_len / nb_words)
        mean_unrm = unrm / nb_words
        mean_sq_unrm = sq_unrm / nb_words
        std_unrm = mean_sq_unrm - mean_unrm * mean_unrm
        logger.info(
            '%s:Val val_loss: %.2f - val_ppl: %.2f - partition: mean: %.2f std: %.2f'
            % (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm))
        log_file.info(
            '%s:Val val_loss: %.6f - val_ppl: %.6f - partition: mean: %.6f std: %.6f'
            % (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm))

        return loss, ppl, mean_unrm, std_unrm
示例#5
0
def detect_objects(image_path):
    image = cv2.imread(image_path)

    (h, w) = image.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843,
                                 (300, 300), 127.5)

    net.setInput(blob)
    detections = net.forward()

    classes = []

    # loop over the detections
    for i in np.arange(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with the
        # prediction
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the `confidence` is
        # greater than the minimum confidence
        if confidence > .5:
            # extract the index of the class label from the `detections`,
            # then compute the (x, y)-coordinates of the bounding box for
            # the object
            idx = int(detections[0, 0, i, 1])
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # display the prediction
            label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
            logger.info("{}".format(label))

            classes.append(CLASSES[idx])
    return classes
示例#6
0
 def change_settings(self, platform: str, region: str) -> dict:
     logger.info(
         f'Changing settings for user {self.username} ({self.platform} -> {platform}, {self.region} -> {region})'
     )
     url = self._build_url('settings', 'verification')
     payload = {'platform': platform, 'region': region}
     return self._json(self._patch(url, payload), 200)
示例#7
0
 def _set_status(self, status: str) -> None:
     logger.info(f'Setting status of user {self.username} to {status}')
     websocket = self._open_ws(platform=self.platform)
     websocket.send(
         f'{{"type":"@WS/USER/SET_STATUS","payload":"{status}"}}')
     websocket.close()
     logger.info(
         f'Status of user {self.username} successfully changed to {status}')
示例#8
0
 def delete_review_on_user(self, username: str, review_id: str) -> dict:
     """
     Delete the review with the given id that you wrote for a user.
     There's no endpoint to find the ids of reviews that you wrote.
     """
     logger.info(f'Deleting review {review_id} of user {username}')
     url = self._build_url('profile', username, 'review', review_id)
     return self._json(self._delete(url), 200)
示例#9
0
 def get_settings(self) -> dict:
     """
     Get the user's settings.
     Returns error for now.
     """
     logger.info(f'Getting settings for user {self.username}')
     url = self._build_url('settings')
     return self._json(self._get(url), 200)
示例#10
0
 def change_review_on_user(self, username: str, review_id: str,
                           text: str) -> dict:
     """
     Change the text of a review that you wrote for a user.
     There's no endpoint to find the ids of reviews that you wrote, so it's UNTESTED and I'm not sure the endpoint works
     """
     logger.info(
         f'Changing review {review_id} of user {username} to "{text}"')
     url = self._build_url('profile', username, 'review', review_id)
     payload = {'text': text}
     return self._json(self._put(url, payload), 200)
示例#11
0
def save_image_db(image_path: str):
    logger.info(f"Processing file {image_path}")
    filename = image_path.split(f"{os.sep}")[-1]
    mime = get_mime(filename.split(".")[1].lower())
    with open(image_path, "rb") as im:
        bin_data = im.read()
    classes = detect_objects(image_path)
    logger.info(f"Detected classes for {image_path}: {classes}")
    for cl in classes:
        Object(cl, bin_data, mime, path=image_path).save()
    os.remove(image_path)
    def validation(self, val_sents, batch_size, log_file=None):
        """
        :param val_sents: validation sentences.
        :type val_sents: a list, each element a ndarray
        :return: tuple
        """
        code_len = 0.
        nb_words = 0.
        loss = 0.0
        unrm = 0.0
        sq_unrm = 0.0
        z_pred = 0.0
        z_true = 0.0
        z_err = 0.0
        z_sq = 0.0

        for sents in val_sents:
            x = [self.negative_sample(sents)]
            loss_, code_len_, nb_words_, unrm_, sq_unrm_, z_pred_, z_true_, z_err_, z_sq_ = \
                self._test_loop(self._test, x, batch_size)
            nb_words += nb_words_
            code_len += code_len_
            loss += loss_ * nb_words_
            unrm += unrm_
            sq_unrm += sq_unrm_
            z_pred += z_pred_
            z_true += z_true_
            z_err += z_err_
            z_sq += z_sq_

        loss /= nb_words
        ppl = math.exp(code_len / nb_words)
        mean_unrm = unrm / nb_words
        mean_sq_unrm = sq_unrm / nb_words
        std_unrm = mean_sq_unrm - mean_unrm * mean_unrm
        z_pred /= nb_words
        z_true /= nb_words
        z_err /= nb_words
        mean_sq_z = z_sq / nb_words
        std_z = mean_sq_z - z_true * z_true
        logger.info(
            '%s:Val val_loss: %.2f - val_ppl: %.2f - partition: mean: %.2f std: %.2f - '
            'z: pred: %.2f true: %.2f err: %.2f std: %.2f' %
            (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm,
             z_pred, z_true, z_err, std_z))
        log_file.info(
            '%s:Val val_loss: %.6f - val_ppl: %.6f - partition: mean: %.6f std: %.6f - '
            'z: pred: %.6f true: %.6f err: %.6f std: %.6f' %
            (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm,
             z_pred, z_true, z_err, std_z))

        return loss, ppl, mean_unrm, std_unrm, z_pred, z_true, z_err
示例#13
0
    def validation(self, val_sents, batch_size, log_file=None):
        """
        :param val_sents: validation sentences.
        :type val_sents: a list, each element a ndarray
        :return: tuple
        """
        code_len = 0.
        nb_words = 0.
        loss = 0.0
        unrm = 0.0
        sq_unrm = 0.0
        z_pred = 0.0
        z_true = 0.0
        z_err = 0.0
        z_sq = 0.0

        for sents in val_sents:
            x = [self.negative_sample(sents)]
            loss_, code_len_, nb_words_, unrm_, sq_unrm_, z_pred_, z_true_, z_err_, z_sq_ = \
                self._test_loop(self._test, x, batch_size)
            nb_words += nb_words_
            code_len += code_len_
            loss += loss_ * nb_words_
            unrm += unrm_
            sq_unrm += sq_unrm_
            z_pred += z_pred_
            z_true += z_true_
            z_err += z_err_
            z_sq += z_sq_

        loss /= nb_words
        ppl = math.exp(code_len/nb_words)
        mean_unrm = unrm / nb_words
        mean_sq_unrm = sq_unrm / nb_words
        std_unrm = mean_sq_unrm - mean_unrm * mean_unrm
        z_pred /= nb_words
        z_true /= nb_words
        z_err /= nb_words
        mean_sq_z = z_sq / nb_words
        std_z = mean_sq_z - z_true * z_true
        logger.info('%s:Val val_loss: %.2f - val_ppl: %.2f - partition: mean: %.2f std: %.2f - '
                    'z: pred: %.2f true: %.2f err: %.2f std: %.2f' %
                    (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm, z_pred, z_true, z_err, std_z))
        log_file.info('%s:Val val_loss: %.6f - val_ppl: %.6f - partition: mean: %.6f std: %.6f - '
                      'z: pred: %.6f true: %.6f err: %.6f std: %.6f' %
                      (self.__class__.__name__, loss, ppl, mean_sq_unrm, std_unrm, z_pred, z_true, z_err, std_z))

        return loss, ppl, mean_unrm, std_unrm, z_pred, z_true, z_err
示例#14
0
    def change(self, item_id: str = None, platinum: int = None, quantity: int = None, visible: bool = None):
        if item_id is None:
            item_id = self.order_json['item']['id']
        if platinum is None:
            platinum = self.order_json['platinum']
        if quantity is None:
            quantity = self.order_json['quantity']
        if visible is None:
            visible = self.order_json['visible']

        logger.info(f'Changing {self.order_type} order {self.order_id} to {self.item_name} x{quantity} for {platinum}p each and visible {visible} for a total of {quantity * platinum}p')
        url = self._build_url('profile', 'orders', self.order_id)
        payload = {
            'item_id': item_id,
            'platinum': platinum,
            'quantity': quantity,
            'visible': visible
        }
        self._json(self._put(url, payload), 200)
        return self
示例#15
0
 def migrate_all_containers(self, node):
     """ Should be called when a compute node is down.
     :param node: The compute node whose containers are to be shifted.
     :return:
     """
     for con in node.container:
         logger.info("Migrating container {0}".format(con))
         if self.database.add_deleted_container(con) is False:
             continue
         status = self.database.delete_ssh_port_forward_rule(con)
         if status is False:
             logger.exception("Failed to delete IP table rule")
             return -1
         node = self.get_compute_node()
         if node is None:
             logger.exception("No available compute node available. Cannot migrate containers")
             return -1
         image_name, ram, cpu, con_name = con.image.image_name, con.ram, con.cpu, con.container_name
         try:
             status, docker_port, con_name, con_id = self.run_container(image_name, node.ip_addr, ram, cpu, con_name)
         except Exception as e:
             logger.exception("Failed to launch a new container of image {0} on Node: {1}", image_name, node.ip_addr)
             continue
         if status is True:
             nat_port = con.nat_port
             image = self.database.get_image(con.image.image_id)
             user = self.database.get_user(con.user.id)
             nat_port, management_ip = self.database.add_ssh_port_forward_rule(node.ip_addr, docker_port, nat_port=nat_port)
             status = self.database.remove_container(con)
             status = self.database.add_container(container_id=con_id, container_name=con_name,
                                                  nat_port=nat_port, management_ip=management_ip,
                                                  docker_port=docker_port, ram=ram,
                                                  cpu=cpu, username=DEFAULT_CONTAINER_USENAME,
                                                  password=DEFAULT_CONTAINER_PASSWORD,
                                                  node=node, image=image, user=user)
             if status is not True:
                 logger.error("Failed to add to Database")
             else:
                 logger.error("Successfully Migrated one container to {0}".format(node.ip_addr))
示例#16
0
    def new(self, item_id: str, order_type: str, platinum: int, quantity: int, visible: bool = True):
        if self.order_id != '':
            logger.warning('Tried to create a new order when it already exists')
            return self

        logger.info(f'Placing a new {order_type} order of {item_id} x{quantity} for {platinum}p each and visible {visible}')
        self.item_id = item_id
        self.order_type = order_type
        self.platinum = platinum
        self.quantity = quantity
        self.visible = visible
        self.item_name = self.get_item_name_by_id(item_id)

        url = self._build_url('profile', 'orders')
        payload = {
            'order_type': self.order_type,
            'item_id': self.item_id,
            'platinum': self.platinum,
            'quantity': self.quantity,
            'visible': self.visible
        }
        self._json(self._post(url, payload), 200)
        return self
示例#17
0
    def train(self, data_file='../data/corpus/wiki-sg-norm-lc-drop-bin.bz2', save_path=None,
              batch_size=256, train_nb_words=100000000, val_nb_words=100000, train_val_nb=100000,
              validation_interval=1800, log_file=None):
        opt_info = self.optimizer.get_config()
        opt_info = ', '.join(["{}: {}".format(n, v) for n, v in opt_info.items()])

        logger.info('training with file: %s' % data_file)
        logger.info('training with batch size %d' % batch_size)
        logger.info('training with %d words; validate with %d words during training; '
                    'evaluate with %d words after training' % (train_nb_words, train_val_nb, val_nb_words))
        logger.info('validate every %f seconds' % float(validation_interval))
        logger.info('optimizer: %s' % opt_info)

        log_file = LogInfo(log_file)
        log_file.info('training with file: %s' % data_file)
        log_file.info('training with batch size %d' % batch_size)
        log_file.info('training with %d words; validate with %d words during training; '
                      'evaluate with %d words after training' % (train_nb_words, train_val_nb, val_nb_words))
        log_file.info('validate every %f seconds' % float(validation_interval))
        log_file.info('optimizer: %s' % opt_info)

        sentences = [None for _ in range(MAX_SETN_LEN)]  # TODO: sentences longer than 64 are ignored.

        max_vocab = self.vocab_size - 1
        nb_trained = 0.
        nb_words_trained = 0.0
        sent_gen = grouped_sentences(data_file)
        val_sents = self.get_val_data(sent_gen, val_nb_words)
        train_val_sents = self.get_val_data(sent_gen, train_val_nb)

        self.validation(train_val_sents, batch_size, log_file)
        start_ = time()
        next_val_time = start_ + validation_interval
        for sents in sent_gen:
            mask = (sents > max_vocab)
            sents[mask] = max_vocab
            chunk = chunk_sentences(sentences, sents, batch_size)
            if chunk is None:
                continue

            # loss, ce, nb_wrd = self._train(chunk, chunk)
            x = self.negative_sample(chunk)
            loss = self._loop_train(x, batch_size)
            nb_trained += chunk.shape[0]
            nb_words_trained += chunk.size
            end_ = time()
            elapsed = float(end_ - start_)
            speed1 = nb_trained/elapsed
            speed2 = nb_words_trained/elapsed
            eta = (train_nb_words - nb_words_trained) / speed2
            eta_h = int(math.floor(eta/3600))
            eta_m = int(math.ceil((eta - eta_h * 3600)/60.))
            logger.info('%s:Train - ETA: %02d:%02d - loss: %5.1f - speed: %.1f sent/s %.1f words/s' %
                        (self.__class__.__name__, eta_h, eta_m, loss, speed1, speed2))
            log_file.info('%s:Train - time: %f - loss: %.6f' % (self.__class__.__name__, end_, loss))

            if end_ > next_val_time:
                # noinspection PyUnresolvedReferences
                self.validation(train_val_sents, batch_size, log_file)
                next_val_time = time() + validation_interval

            if nb_words_trained >= train_nb_words:
                logger.info('Training finished. Evaluating ...')
                log_file.info('Training finished. Evaluating ...')
                self.validation(val_sents, batch_size, log_file)
                if save_path is not None:
                    self.save_params(save_path)
                break
        log_file.close()
示例#18
0
    sparse_coding = pickle.load(f)
    # print sparse_coding.dtype

nb_vocab = options.nb_vocab
sparse_coding = sparse_coding[nb_vocab // 1000]
nb_vocab, nb_base = sparse_coding.shape
nb_base -= 1
unigram_table = get_unigram_probtable(
    nb_words=nb_vocab,
    save_path='../data/wiki-unigram-prob-size%d.pkl' % nb_vocab)

if embedding_file != '':
    with file('../data/wiki-wordmap-trunc300k.wp', 'rb') as f:
        wp = pickle.load(f)
    freq = wp['idx2wc']
    logger.info('Using word2vec to initialize word embeddings %s ' %
                embedding_file)
    ini_embeds = [compose_dense_repr(nb_base, nb_vocab, freq, embedding_file)]
else:
    ini_embeds = None

if options.decay:
    opt = AdamAnneal(lr=options.lr, lr_min=options.lr_min, gamma=options.gamma)
else:
    opt = adam(lr=options.lr)

if options.log_file == '':
    log_file = None
else:
    log_file = options.log_file

if options.save == '':
    def train(self,
              data_file='../data/corpus/wiki-sg-norm-lc-drop-bin.bz2',
              save_path=None,
              batch_size=256,
              train_nb_words=100000000,
              val_nb_words=100000,
              train_val_nb=100000,
              validation_interval=1800,
              log_file=None):
        opt_info = self.optimizer.get_config()
        opt_info = ', '.join(
            ["{}: {}".format(n, v) for n, v in opt_info.items()])

        logger.info('training with file: %s' % data_file)
        logger.info('training with batch size %d' % batch_size)
        logger.info(
            'training with %d words; validate with %d words during training; '
            'evaluate with %d words after training' %
            (train_nb_words, train_val_nb, val_nb_words))
        logger.info('validate every %f seconds' % float(validation_interval))
        logger.info('optimizer: %s' % opt_info)

        log_file = LogInfo(log_file)
        log_file.info('training with file: %s' % data_file)
        log_file.info('training with batch size %d' % batch_size)
        log_file.info(
            'training with %d words; validate with %d words during training; '
            'evaluate with %d words after training' %
            (train_nb_words, train_val_nb, val_nb_words))
        log_file.info('validate every %f seconds' % float(validation_interval))
        log_file.info('optimizer: %s' % opt_info)

        sentences = [None for _ in range(MAX_SETN_LEN)
                     ]  # TODO: sentences longer than 64 are ignored.

        max_vocab = self.vocab_size - 1
        nb_trained = 0.
        nb_words_trained = 0.0
        sent_gen = grouped_sentences(data_file)
        val_sents = self.get_val_data(sent_gen, val_nb_words)
        train_val_sents = self.get_val_data(sent_gen, train_val_nb)

        self.validation(train_val_sents, batch_size, log_file)
        start_ = time()
        next_val_time = start_ + validation_interval
        for sents in sent_gen:
            mask = (sents > max_vocab)
            sents[mask] = max_vocab
            chunk = chunk_sentences(sentences, sents, batch_size)
            if chunk is None:
                continue

            # loss, ce, nb_wrd = self._train(chunk, chunk)
            x = self.negative_sample(chunk)
            loss = self._loop_train(x, batch_size)
            nb_trained += chunk.shape[0]
            nb_words_trained += chunk.size
            end_ = time()
            elapsed = float(end_ - start_)
            speed1 = nb_trained / elapsed
            speed2 = nb_words_trained / elapsed
            eta = (train_nb_words - nb_words_trained) / speed2
            eta_h = int(math.floor(eta / 3600))
            eta_m = int(math.ceil((eta - eta_h * 3600) / 60.))
            logger.info(
                '%s:Train - ETA: %02d:%02d - loss: %5.1f - speed: %.1f sent/s %.1f words/s'
                %
                (self.__class__.__name__, eta_h, eta_m, loss, speed1, speed2))
            log_file.info('%s:Train - time: %f - loss: %.6f' %
                          (self.__class__.__name__, end_, loss))

            if end_ > next_val_time:
                # noinspection PyUnresolvedReferences
                self.validation(train_val_sents, batch_size, log_file)
                next_val_time = time() + validation_interval

            if nb_words_trained >= train_nb_words:
                logger.info('Training finished. Evaluating ...')
                log_file.info('Training finished. Evaluating ...')
                self.validation(val_sents, batch_size, log_file)
                if save_path is not None:
                    self.save_params(save_path)
                break
示例#20
0
def objects_from_image():
    logger.info(detect_objects("static/images/image2.jpg"))
示例#21
0
 def delete(self) -> dict:
     logger.info(f'Deleting {self.order_type} order {self.order_id}: {self.item_name} x{self.quantity} for {self.platinum}p each and visible {self.visible}')
     url = self._build_url('profile', 'orders', self.order_id)
     return self._json(self._delete(url), 200)
示例#22
0
文件: main_nce8.py 项目: chenych11/lm
with file(options.coding_file, 'rb') as f:
    sparse_coding = pickle.load(f)
    # print sparse_coding.dtype

nb_vocab = options.nb_vocab
sparse_coding = sparse_coding[nb_vocab//1000]
nb_vocab, nb_base = sparse_coding.shape
nb_base -= 1
unigram_table = get_unigram_probtable(nb_words=nb_vocab, save_path='../data/wiki-unigram-prob-size%d.pkl' % nb_vocab)

if embedding_file != '':
    with file('../data/wiki-wordmap-trunc300k.wp', 'rb') as f:
        wp = pickle.load(f)
    freq = wp['idx2wc']
    logger.info('Using word2vec to initialize word embeddings %s ' % embedding_file)
    ini_embeds = [compose_dense_repr(nb_base, nb_vocab, freq, embedding_file)]
else:
    ini_embeds = None

if options.decay:
    opt = AdamAnneal(lr=options.lr, lr_min=options.lr_min, gamma=options.gamma)
else:
    opt = adam(lr=options.lr)

if options.log_file == '':
    log_file = None
else:
    log_file = options.log_file

if options.save == '':
示例#23
0
 def get_profile(self) -> dict:
     logger.info(f'Getting info for profile: {self.username}')
     url = self._build_url('profile', self.username)
     return self._json(self._get(url), 200)
示例#24
0
 def get_statistics(self) -> dict:
     logger.info(f'Getting statistics for profile: {self.username}')
     url = self._build_url('profile', self.username, 'statistics')
     return self._json(self._get(url), 200)
示例#25
0
 def get_achievements(self) -> dict:
     logger.info(f'Getting achievements for profile: {self.username}')
     url = self._build_url('profile', self.username, 'achievements')
     return self._json(self._get(url), 200)
示例#26
0
 def get_reviews(self) -> dict:
     logger.info(f'Getting reviews for profile: {self.username}')
     url = self._build_url('profile', self.username, 'reviews')
     return self._json(self._get(url), 200)
示例#27
0
    def run_container(image_name, compute_node_ip, ram, cpu, container_name=None):
        """
            Start a container related to the image_name on compute_node_ip
            TODO: SSH into compute_node_ip and execute the
            proper docker run command
            :param image_name: (string) Eg. Nginx
            :param compute_node_ip: (String) Eg. 10.0.0.1
            :param ram: Max ram for this container (in MB)
            :return: Status(Boolean), Docker port(Integer), container_name(str), container_id(str)
        """
        #pdb.set_trace()
        image_name = "{0}/{1}".format(DOCKER_REGISTRY_URI, image_name)
        base_cmd = "ssh -o StrictHostKeyChecking=no root@{0} ".format(compute_node_ip)
        pull_cmd = "docker pull {0}".format(image_name)
        try:
            # We don't the exception to be printed on master's screen, hence stdout=sp.PIPE
            output = sp.call("{0}{1}".format(base_cmd, pull_cmd), shell=True, stdout=sp.PIPE)
            if output != 0:
                logger.error("Failed: Pull image {0}".format(image_name))
        except Exception as e:
            logger.exception("Failed: command: {0}{1}\n{2}".format(base_cmd, pull_cmd,e))
            # It's ok to get an exception. Maybe the registry is down. Just try if the image
            # is in local storage.
            # return False, None, None, None

        # Once image has been pulled, run it.
        # We plan to restart the containers on a failed node to a live node.
        # Hence we don't want the --restart always option. Otherwise we will
        # have duplicate containers.
        TEMPORARY = "temporary"
        if container_name:  # If a container name has been given
            # Check if a shared dir already exists
            master_dir_name = os.path.join(MASTER_SHARED_DIR, container_name)
            host_mount_dir = os.path.join(SERVER_SHARED_DIR, container_name)
            container_name = "--name {0}".format(container_name)
        else:
            container_name = ""
            host_mount_dir = os.path.join(SERVER_SHARED_DIR, TEMPORARY )
            master_dir_name = os.path.join(MASTER_SHARED_DIR, TEMPORARY )
        if not os.path.exists(master_dir_name):
            os.mkdir(master_dir_name)
            os.chmod(master_dir_name, 0o777)
        shared_volume = "-v {0}:{1}".format(host_mount_dir, CON_SHARED_DIR)
        run_cmd = "docker run -P --detach --cpus {0} --memory {1}m {2} {3} {4}" \
                  .format(cpu, ram, container_name, shared_volume, image_name)
        try:
            print("{0}{1}".format(base_cmd, run_cmd))
            #exit()
            cont_id = sp.check_output("{0}{1}".format(base_cmd, run_cmd), shell=True).decode('utf-8')
            assert cont_id
        except Exception as e:
            logger.exception("Failed: {0}{1}\n{1}".format(base_cmd, run_cmd, e))
            return False, None, None, None

        inspect_cmd = "docker inspect {0}".format(cont_id)
        try:
            output = sp.check_output("{0}{1}".format(base_cmd, inspect_cmd), shell=True).decode('utf-8')
            data = json.loads(output)[0]
            cont_name = data['Name'][1:].strip()  # The output is "/some_name"
            docker_port = int(data['NetworkSettings']['Ports']['22/tcp'][0]['HostPort'])
            new_dir_name = os.path.join(MASTER_SHARED_DIR, cont_name)
            os.rename(master_dir_name, new_dir_name)
            logger.info("Successfully Launched Container. Name={0}, Mounted Dir {1}".format(cont_name, new_dir_name))
        except Exception as e:
            logger.exception("Failed: {0}{1}\n{2}".format(base_cmd, inspect_cmd, e))
            return False, None, None, None

        return True, docker_port, cont_name, cont_id.strip()
示例#28
0
 def change_bio(self, message: str) -> dict:
     """Change the current user's profile bio and return JSON of the new bio: "{'about': '<p>Hello World</p>', 'about_raw': 'Hello World'}"."""
     logger.info(f'Changing profile bio for user {self.username}')
     url = self._build_url('profile', 'customization', 'about')
     payload = {'about': message}
     return self._json(self._post(url, payload), 200)