Esempio n. 1
0
def send_speech(bot, chat_id, speech_id):
    try:
        speech = DialogManager.get_speech(speech_id)
    except StopIteration:
        speech = DialogManager.get_first_speech()

    button_list = [
        InlineKeyboardButton(option['text'],
                             callback_data=str(option['reference']))
        for option in speech['options']
    ]
    reply_markup = InlineKeyboardMarkup(build_menu(button_list, n_cols=2))
    bot.send_message(chat_id=chat_id,
                     text=speech['text'],
                     reply_markup=reply_markup)
 def setup_for_train_rl(self):
     # Target model from the original code - just a helper for RL,
     # and our dialog_manager is a `behavior model` (one that we'll use after training)
     if self.args.encoder_pretrain == True:
         vocab = np.load("data/vocab.npz")["dic"][()]
         vocab_size = len(vocab)
     else:
         vocab_size = self.user.vocabSize
     self.target_dialog_manager = DialogManager(num_emb=vocab_size +
                                                1).to(DEVICE)
     self.target_dialog_manager.load_state_dict(
         self.dialog_manager.state_dict())
     # Tie rankers to be the same object:
     self.target_dialog_manager.ranker = self.dialog_manager.ranker
     self.dialog_manager.set_rl_mode()
     self.target_dialog_manager.eval()
Esempio n. 3
0
class GameManager(object):
    
    def __init__(self, dialog_xml, event_xml, level):
        self._dialog_manager = DialogManager(dialog_xml, event_xml)
        self._stats = {}
        self._stats['level'] = level
        self._stats['idle'] = 50
        self._stats['combat'] = 50
        self._stats['science'] = 50
        self._stats['food'] = 50
        self._stats['medicine'] = 50
        self._stats['intel'] = 50
        self._stats['range'] = 50
        self._stats['sick'] = 50
        self._stats['honor'] = 50
        self._stats['money'] = 50
        
    def update(self):
        self._dialog_manager.update(self._stats)
    def __init__(self, session=None):
        self._session = session
        self._module_name = self.__class__.__name__
        self.__name__ = self._module_name
        self._logger = qi.Logger(self._module_name)
        self._logger.info(":::: Starting {} ::::".format(self._module_name))
        self._pref_domain = "tool.applauncher"  # @TODO: "com.sbr.apps.app-launcher"

        # public variables
        self._logger.info("Initializing public variables...")
        self.current_state = qi.Property("s")
        self.current_state.setValue("")
        self.current_page = qi.Property("s")
        self.current_page.setValue("Home")
        self.apps_full_list = qi.Property()
        self.apps_full_list.setValue({})
        self.pages_definition = qi.Property()
        self.pages_definition.setValue({})
        self.autonomous_enabled = qi.Property("b")
        self.autonomous_enabled.setValue(True)
        self.display_app_name = qi.Property("b")
        self.display_app_name.setValue(True)
        self.ping_required = qi.Signal("(i)")

        # internal variables
        self._logger.info("Initializing internal variables...")
        self._app_uuid = helpers.find_app_name(self._logger)
        self._current_app = ""
        self._preferences_manager = PreferencesManager(self._logger, self._session, self._pref_domain)
        self._app_list_manager = AppListManager(self._logger, self._session, self._preferences_manager,
                                                self._app_uuid, self.apps_full_list, self.pages_definition)
        self._view_manager = ViewManager(self._logger, self._session, self._preferences_manager,
                                         self._app_uuid, self.current_state, self.ping_required)
        self._dialog_manager = DialogManager(self._logger, self._session, self._preferences_manager,
                                             self.pages_definition, self.autonomous_enabled, self.current_page)

        _pref_display_app_name = self._preferences_manager.get_value('behaviorNameDisplayed', True)
        if _pref_display_app_name:
            self.display_app_name.setValue(True)
        else:
            self.display_app_name.setValue(False)

        self._logger.info(":::: Ready! ::::")
Esempio n. 5
0
def start_bot(c):
    DialogManager.set_speeches(c.config.speeches, c.config.start_id)

    request_kwargs = {
        'proxy_url': c.config.proxy.url,
    }

    updater = Updater(token=c.config.token, request_kwargs=request_kwargs)
    dispatcher = updater.dispatcher

    start_handler = CommandHandler('start', hello_message)
    dialog = MessageHandler(Filters.text, dialog_handler)
    callback_dialog = CallbackQueryHandler(dialog_callback_handler)

    dispatcher.add_handler(start_handler)
    dispatcher.add_handler(callback_dialog)
    dispatcher.add_handler(dialog)

    logging.info("Start bot..")
    updater.start_polling()
Esempio n. 6
0

if __name__ == "__main__":
    args = parse_args()

    # Create user and dialog characters
    user = SimUser().to(DEVICE)
    # You can use more DialogManager parameters to change language model, embeddings, etc if needed:
    if args.encoder_pretrain == True:
        vocab = np.load("data/vocab.npz")["dic"][()]
        vocab_size = len(vocab)
    else:
        vocab_size = user.vocabSize

    dialog_manager = DialogManager(num_emb=vocab_size + 1,
        pretrained=args.pretrained, given_embs=args.given_embs, 
        rnn_over_captions = args.rnn_over_captions, encoder_pretrain=args.encoder_pretrain).to(DEVICE)

    if args.adversarial == True:
        discriminator = DNet().to(DEVICE)
    else:
        discriminator = None
    
    print("created user and dialog, starting training")

    do_train = not args.eval_only
    trainer = Trainer(dialog_manager, discriminator, user, args)

    # Loading pretrained model, e.g. from arithmetic pretraining
    if args.pretrained_model is not None:
        print("Loading", args.pretrained_model)
Esempio n. 7
0
                        help='pretrained model under model folder')

    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = parse_args()

    exp_name = "concat_arithm_2"
    args.pretrained_model = f"rl_{exp_name}-4.pt"

    # Create user and dialog characters
    user = SimUser().to(DEVICE)
    dialog_manager = DialogManager(num_emb=user.vocabSize + 1,
                                   pretrained=args.pretrained,
                                   given_embs=args.given_embs).to(DEVICE)
    trainer = ArithmeticTrainer(dialog_manager, user, args)
    if args.pretrained_model is not None:
        trainer.load_model(args.pretrained_model)

    # train for some number of epochs
    epoch = 0
    for epoch in range(args.n_epochs):
        trainer.train_arithmetic(epoch)
    # trainer.save_model('arithmetic.pt')

    print("\tFinished training, starting final evaluation:")
    pairs = trainer.eval_arithmetic(epoch)

    with open(f"./resources/dialogs/pairs_{exp_name}.pkl", "wb") as f:
Esempio n. 8
0
from flask import Flask
from flask import render_template
from flask import request, jsonify, send_file
from dialog_manager import DialogManager

app = Flask(__name__)
manager = DialogManager()


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/dialog', methods=['GET', 'POST'])
def dialog():
    domain = request.args.get('domain')
    sentence = request.args.get('message')
    sentence = sentence.strip()
    manager.user(domain, sentence)
    res = manager.response()
    return jsonify(res)


@app.route('/resetDialog', methods=['GET', 'POST'])
def resetDialog():
    manager.reset()
    return "reset done!"


@app.route('/intent_classifier/<filename>', methods=['GET'])
class Trainer(BaseTrainer):
    def __init__(self, dialog_manager, discriminator, user, args):
        # Our main characters:
        self.dialog_manager = dialog_manager
        self.discriminator = discriminator
        self.user = user

        # Setting up optimizers for both training phases
        self.optimizer_sl = optim.Adam(self.dialog_manager.parameters(),
                                       lr=args.lr_sl,
                                       weight_decay=1e-8)

        if args.adversarial == True:
            self.optimizer_d = optim.Adam(self.discriminator.parameters(),
                                          lr=args.lr_d,
                                          weight_decay=1e-8)

        # A different optimizer for RL phase, may have different params later on:
        self.optimizer_rl = optim.Adam(self.dialog_manager.parameters(),
                                       lr=args.lr_rl)

        # Keep the args for other purposes
        self.args = args

    def train_sl(self, epoch):
        """
        Train for one epoch with supervised learning:
        using triplet loss
        """
        self.dialog_manager.train()
        if self.args.adversarial == True:
            self.discriminator.train()

        all_input = self.user.train_feature
        dialog_turns = self.args.train_turns

        exp_monitor_candidate = ExpMonitor(user=self.user,
                                           train_mode=True,
                                           args=self.args,
                                           stage="SL")

        user_img_idx = torch.LongTensor(self.args.batch_size)
        act_img_idx = torch.LongTensor(self.args.batch_size)
        neg_img_idx = torch.LongTensor(self.args.batch_size)
        num_batches = math.ceil(all_input.size(0) / self.args.batch_size)

        if DEBUG: num_batches = 1

        for batch_idx in tqdm(range(1, num_batches + 1)):
            # sample target images and first turn feedback images
            self.user.sample_idx(user_img_idx, train_mode=True)
            self.user.sample_idx(act_img_idx, train_mode=True)

            # Update representations in Ranker
            self.dialog_manager.update_ranker_rep(all_input)

            # Reset hidden state of state tracker
            self.dialog_manager.reset_state_tracker(self.args.batch_size)

            outs = []
            if self.args.adversarial == True:
                outs_d = []

            act_input = all_input[act_img_idx]
            act_input = act_input.to(DEVICE)

            act_emb = self.dialog_manager.state_representer.forward_image(
                act_input)

            for k in range(dialog_turns):
                # get relative captions from user model given user target images and feedback images
                # txt_input is a tensor of shape (batch_size, caption_length),  decoded_text is a list of full captions of length batch_size:
                txt_input, decoded_text = self.user.get_feedback(
                    act_idx=act_img_idx,
                    user_idx=user_img_idx,
                    train_mode=True)
                txt_input = txt_input.to(DEVICE)

                if self.args.arithm_reg > 0:
                    target_input = all_input[user_img_idx].to(DEVICE)
                    cand_input = all_input[act_img_idx].to(DEVICE)
                    target_emb = self.dialog_manager.state_representer.forward_image(
                        target_input)
                    cand_emb = self.dialog_manager.state_representer.forward_image(
                        cand_input)
                    rel_caption_emb = self.dialog_manager.state_representer.forward_text(
                        txt_input)
                    arithm_loss = ((
                        (target_emb - cand_emb) - rel_caption_emb)**
                                   2).mean(dim=0).sum() * self.args.arithm_reg

                # update the query action vector given feedback image and text feedback in this turn
                # get a representation of history, based on the new candidate+caption pair:
                img_emb, txt_emb, action, _ = self.dialog_manager.forward(
                    act_emb, txt_input, is_raw_img=False)

                if self.args.adversarial == True:
                    # Forward image and text embeddings to discriminator and get d_loss
                    d_real = self.discriminator(img_emb.detach())
                    d_fake = self.discriminator(txt_emb.detach())
                    d_real_loss = torch.mean(1 - d_real)
                    d_fake_loss = torch.mean(d_fake)
                    loss_d = d_real_loss + d_fake_loss
                    outs_d.append(loss_d)

                # obtain the next turn's feedback images
                act_img_idx = self.dialog_manager.nearest_neighbor(action)

                # sample negative images for triplet loss
                self.user.sample_idx(neg_img_idx, train_mode=True)

                user_input = all_input[user_img_idx]
                neg_input = all_input[neg_img_idx]
                new_act_input = all_input[act_img_idx]
                user_input = user_input.to(DEVICE)
                neg_input = neg_input.to(DEVICE)
                new_act_input = new_act_input.to(DEVICE)

                new_act_emb = self.dialog_manager.state_representer.forward_image(
                    new_act_input)
                # ranking and loss
                ranking_candidate = self.dialog_manager.compute_rank(
                    action, user_img_idx)
                user_emb = self.dialog_manager.state_representer.forward_image(
                    user_input)
                neg_emb = self.dialog_manager.state_representer.forward_image(
                    neg_input)
                loss = triplet_loss(self.args.triplet_margin, action, user_emb,
                                    neg_emb)

                if self.args.arithm_reg > 0:
                    loss += arithm_loss

                if self.args.adversarial == True:
                    # get g_loss
                    d_real_backward = self.discriminator(img_emb)
                    d_fake_backward = self.discriminator(txt_emb)
                    g_real_loss = torch.mean(d_real_backward)
                    g_fake_loss = torch.mean(1 - d_fake_backward)
                    loss_gan = g_real_loss + g_fake_loss
                    loss += loss_gan

                outs.append(loss)
                act_emb = new_act_emb

                exp_monitor_candidate.log_step(ranking_candidate, loss,
                                               user_img_idx, act_img_idx, k)

            #print("d_loss and g_loss:")
            # finish dialog and update model parameters
            if self.args.adversarial == True:
                self.optimizer_d.zero_grad()
                outs_d = torch.stack(outs_d, dim=0).mean()
                #print(outs_d)
                outs_d.backward()
                self.optimizer_d.step()

            self.optimizer_sl.zero_grad()
            outs = torch.stack(outs, dim=0).mean()
            #print(outs)
            outs.backward()
            self.optimizer_sl.step()

            if batch_idx % self.args.log_interval == 0:
                print('# candidate ranking #')
                exp_monitor_candidate.print_interval(epoch, batch_idx,
                                                     num_batches)

        print('# candidate ranking #')
        exp_monitor_candidate.print_all(epoch)

    def setup_for_train_rl(self):
        # Target model from the original code - just a helper for RL,
        # and our dialog_manager is a `behavior model` (one that we'll use after training)
        if self.args.encoder_pretrain == True:
            vocab = np.load("data/vocab.npz")["dic"][()]
            vocab_size = len(vocab)
        else:
            vocab_size = self.user.vocabSize
        self.target_dialog_manager = DialogManager(num_emb=vocab_size +
                                                   1).to(DEVICE)
        self.target_dialog_manager.load_state_dict(
            self.dialog_manager.state_dict())
        # Tie rankers to be the same object:
        self.target_dialog_manager.ranker = self.dialog_manager.ranker
        self.dialog_manager.set_rl_mode()
        self.target_dialog_manager.eval()

    def train_rl(self, epoch):
        """
        Train for one epoch with RL:
        using model-based policy improvement
        """
        self.dialog_manager.train()
        # train / test
        all_input = self.user.train_feature
        dialog_turns = self.args.train_turns

        user_img_idx = torch.LongTensor(self.args.batch_size)
        act_img_idx = torch.LongTensor(self.args.batch_size)

        exp_monitor_candidate = ExpMonitor(user=self.user,
                                           train_mode=True,
                                           args=self.args,
                                           stage="RL")

        # update ranker in the target model
        self.target_dialog_manager.update_ranker_rep(all_input)
        num_batches = math.ceil(all_input.size(0) / self.args.batch_size)

        if DEBUG: num_batches = 1

        for batch_idx in tqdm(range(1, num_batches + 1)):
            # sample data index
            self.user.sample_idx(user_img_idx, train_mode=True)
            self.user.sample_idx(act_img_idx, train_mode=True)

            self.target_dialog_manager.reset_state_tracker(
                self.args.batch_size)
            self.dialog_manager.reset_state_tracker(self.args.batch_size)

            loss_sum = 0
            for k in range(dialog_turns):
                # construct data
                txt_input, decoded_text = self.user.get_feedback(
                    act_idx=act_img_idx.to(DEVICE),
                    user_idx=user_img_idx.to(DEVICE),
                    train_mode=True)
                txt_input = txt_input.to(DEVICE)

                # update model part
                act_img_idx = act_img_idx.to(DEVICE)
                act_emb = self.dialog_manager.ranker.feat[act_img_idx]
                _, _, behavior_state, _ = self.dialog_manager.forward(
                    act_emb, txt_input, is_raw_img=False)

                # update base model part
                _, _, target_state, _ = self.target_dialog_manager.forward(
                    act_emb, txt_input, is_raw_img=False)

                ranking_candidate = self.dialog_manager.compute_rank(
                    behavior_state, user_img_idx)

                act_img_idx_mc, loss = rollout_search(
                    self.user, self.dialog_manager, self.target_dialog_manager,
                    behavior_state, target_state, k, dialog_turns,
                    user_img_idx, all_input, self.args)

                loss_sum = loss + loss_sum

                act_img_idx.copy_(act_img_idx_mc)

                exp_monitor_candidate.log_step(ranking_candidate, loss,
                                               user_img_idx, act_img_idx, k)

            self.optimizer_rl.zero_grad()
            loss_sum.backward()
            self.optimizer_rl.step()

            if batch_idx % self.args.log_interval == 0:
                print('# candidate ranking #')
                exp_monitor_candidate.print_interval(epoch, batch_idx,
                                                     num_batches)

        print('# candidate ranking #')
        exp_monitor_candidate.print_all(epoch)

    def evaluate(self, epoch, print_decodings=False):
        self.dialog_manager.eval()
        train_mode = False
        all_input = self.user.test_feature
        dialog_turns = self.args.test_turns

        exp_monitor_candidate = ExpMonitor(user=self.user,
                                           train_mode=train_mode,
                                           args=self.args)

        user_img_idx = torch.LongTensor(self.args.batch_size)
        act_img_idx = torch.LongTensor(self.args.batch_size)
        neg_img_idx = torch.LongTensor(self.args.batch_size)
        num_batches = math.ceil(all_input.size(0) / self.args.batch_size)

        if DEBUG: num_batches = 1

        self.dialog_manager.update_ranker_rep(all_input)
        unrolled_dialogs = []
        for batch_idx in range(1, num_batches + 1):
            # sample data index
            self.user.sample_idx(user_img_idx,
                                 train_mode=train_mode,
                                 fix_state=True,
                                 seed=2 * batch_idx)
            self.user.sample_idx(act_img_idx,
                                 train_mode=train_mode,
                                 fix_state=True,
                                 seed=2 * batch_idx + 1)

            self.dialog_manager.reset_state_tracker(self.args.batch_size)

            act_img_idx = act_img_idx.to(DEVICE)
            act_emb = self.dialog_manager.ranker.feat[act_img_idx]

            dialog_batch = []
            for k in range(dialog_turns):
                # txt_input is a tensor of shape (batch_size, caption_length),  decoded_text is a list of full captions of length batch_size:
                txt_input, decoded_text = self.user.get_feedback(
                    act_idx=act_img_idx.to(DEVICE),
                    user_idx=user_img_idx.to(DEVICE),
                    train_mode=train_mode)
                txt_input = txt_input.to(DEVICE)

                if print_decodings:
                    print(decoded_text)

                _, _, action, text_att = self.dialog_manager.forward(
                    act_emb, txt_input, is_raw_img=False)
                act_img_idx = self.dialog_manager.nearest_neighbor(action)
                # act_img_idx is the index of new candidate image
                image_names = self.user.get_image_names_from_idx(
                    act_img_idx.detach().numpy(), train_mode)
                if print_decodings:
                    print(image_names)

                self.user.sample_idx(neg_img_idx, train_mode=train_mode)
                user_img_idx = user_img_idx.to(DEVICE)
                neg_img_idx = neg_img_idx.to(DEVICE)
                act_img_idx = act_img_idx.to(DEVICE)

                user_emb = self.dialog_manager.ranker.feat[user_img_idx]
                neg_emb = self.dialog_manager.ranker.feat[neg_img_idx]
                new_act_emb = self.dialog_manager.ranker.feat[act_img_idx]

                ranking_candidate = self.dialog_manager.compute_rank(
                    action, user_img_idx)
                loss = triplet_loss(self.args.triplet_margin, action, user_emb,
                                    neg_emb)
                act_emb = new_act_emb

                # log
                exp_monitor_candidate.log_step(ranking_candidate, loss,
                                               user_img_idx, act_img_idx, k)

                # Add tuple (captions, next candidates)
                dialog_batch.append((decoded_text, text_att, image_names))

            # Decode the target user's image and add to batch, too
            target_image_names = self.user.get_image_names_from_idx(
                user_img_idx.cpu().detach().numpy(), train_mode)
            dialog_batch.append((None, target_image_names))

            # Append results for this batch
            unrolled_dialogs.append(dialog_batch)

        exp_monitor_candidate.print_all(epoch)
        return unrolled_dialogs
class AppLauncher:

    @qi.nobind
    def __init__(self, session=None):
        self._session = session
        self._module_name = self.__class__.__name__
        self.__name__ = self._module_name
        self._logger = qi.Logger(self._module_name)
        self._logger.info(":::: Starting {} ::::".format(self._module_name))
        self._pref_domain = "tool.applauncher"  # @TODO: "com.sbr.apps.app-launcher"

        # public variables
        self._logger.info("Initializing public variables...")
        self.current_state = qi.Property("s")
        self.current_state.setValue("")
        self.current_page = qi.Property("s")
        self.current_page.setValue("Home")
        self.apps_full_list = qi.Property()
        self.apps_full_list.setValue({})
        self.pages_definition = qi.Property()
        self.pages_definition.setValue({})
        self.autonomous_enabled = qi.Property("b")
        self.autonomous_enabled.setValue(True)
        self.display_app_name = qi.Property("b")
        self.display_app_name.setValue(True)
        self.ping_required = qi.Signal("(i)")

        # internal variables
        self._logger.info("Initializing internal variables...")
        self._app_uuid = helpers.find_app_name(self._logger)
        self._current_app = ""
        self._preferences_manager = PreferencesManager(self._logger, self._session, self._pref_domain)
        self._app_list_manager = AppListManager(self._logger, self._session, self._preferences_manager,
                                                self._app_uuid, self.apps_full_list, self.pages_definition)
        self._view_manager = ViewManager(self._logger, self._session, self._preferences_manager,
                                         self._app_uuid, self.current_state, self.ping_required)
        self._dialog_manager = DialogManager(self._logger, self._session, self._preferences_manager,
                                             self.pages_definition, self.autonomous_enabled, self.current_page)

        _pref_display_app_name = self._preferences_manager.get_value('behaviorNameDisplayed', True)
        if _pref_display_app_name:
            self.display_app_name.setValue(True)
        else:
            self.display_app_name.setValue(False)

        self._logger.info(":::: Ready! ::::")

    @qi.nobind
    def cleanup(self):
        try:
            self._logger.info(":::: Stopping app launcher... ::::")
        except NameError:
            print "╔══════════════════════════╦══════════════════════════╗"
            print "║ End of automatic logging ║ was the app uninstalled? ║"
            print "╚══════════════════════════╩══════════════════════════╝"

            class DummyLog:
                def __init__(self):
                    pass

                @staticmethod
                def verbose(*args):
                    for a in args:
                        print "verbose: {}".format(a)

                @staticmethod
                def info(*args):
                    for a in args:
                        print "info: {}".format(a)

                @staticmethod
                def warning(*args):
                    for a in args:
                        print "warning: {}".format(a)

                @staticmethod
                def error(*args):
                    for a in args:
                        print "error: {}".format(a)

            self._logger = DummyLog()
            self._dialog_manager._logger = self._logger
            self._view_manager._logger = self._logger
            self._app_list_manager._logger = self._logger
            self._app_list_manager._icons_storage._logger = self._logger
            self._preferences_manager._logger = self._logger
        try:
            # clean variables
            self._dialog_manager.cleanup()
            self._view_manager.cleanup()
            self._app_list_manager.cleanup()
            self._preferences_manager.cleanup()
        except Exception as e:
            self._logger.info("error while stopping app launcher: {}".format(e))

        # Reset states
        try:
            basic = self._session.service("ALBasicAwareness")
            basic.setEnabled(True)
        except Exception as e:
            self._logger.info("error while configuring ALBasicAwareness: {}".format(e))

        self._logger.info(":::: Stopped! ::::")

    """
        Public bound functions
    """
    @qi.bind(paramsType=[qi.Int32], returnType=qi.Void, methodName="ping")
    def ping(self, seconds_before_next_ping_request):
        """ This function should be called by the web page to signal that it is still alive.
        When the signal ping_required is raised, the web page should call this function.
        In case ping is not called in time, the tablet will be reset.

        Argument: delay in seconds before next ping will be asked."""
        self._view_manager.ping(seconds_before_next_ping_request)

    @qi.bind(paramsType=[], returnType=qi.Void, methodName="_updateAppList")
    def _update_app_list(self):
        """ Reload the list of applications and pages."""
        self._app_list_manager.update_app_lists(None, True)

    @qi.bind(paramsType=[qi.String], returnType=qi.Void, methodName="runBehavior")
    def run_behavior(self, behavior):
        """ Ask autonomous life to start a given behavior and check if the launch was ok after 15s.

        Argument: behavior to launch."""
        try:
            # If an error occurs during launch, the tablet will be displayed again
            app_launched_check = qi.async(self._view_manager.display_view, delay=15000000)
            # Life switch focus to the chosen app
            life = self._session.service("ALAutonomousLife")
            life.switchFocus(behavior)
            self._logger.info("Switch focus")
            app_launched_check.cancel()
            self._logger.info("Application launch end")

        except Exception as e:
            self._logger.error("Run behavior error: " + str(e))

    @qi.bind(paramsType=[], returnType=qi.Void, methodName="stopBehavior")
    def stop_behavior(self):
        """ Stop the current running behavior. """
        self._session.service('ALBehaviorManager').stop_behavior(self._current_app)
        self._logger.info("Stop behavior: {}" .format(self._current_app))

    @qi.bind(paramsType=[qi.Int32], returnType=qi.Void, methodName="adjustVolume")
    def adjust_volume(self, diff):
        """ Change the robot volume. The volume will not go higher than 100% or lower than 20%.

        Argument: delta (positive or negative) to add to the volume. """
        audio = self._session.service('ALAudioDevice')
        current_volume = audio.getOutputVolume()
        new_volume = current_volume + diff
        if new_volume > 100:
            new_volume = 100
        elif new_volume < 20:
            new_volume = 20
        audio.setOutputVolume(new_volume)
        self._logger.info("New volume: {}" .format(new_volume))