Пример #1
0
def load_jobdir( joblist, delAbsent = True, verbose = True ):
    job_path = joblist.jobsdir
    megui_jobs = joblist.getJobList()
    files = util.scan_dir( job_path, recursive = False, pattern = 'job*', verbose = False )
    lst_missed = []
    to_del = []
    for f in sorted( files ):
        f = os.path.basename( f )
        m = re_job.match( f )
        if m:
            if m.group(1) not in megui_jobs:
                lst_missed.append(m.group(0))
                joblist.appendJob(m.group(1))
    joblist.findMaxJobNum()
    if delAbsent:
        lst = map(lambda f: os.path.basename(f), files)
        for job in megui_jobs:
            if job+".xml" not in lst:
                to_del.append(job)
        if len(to_del):
            joblist.delJob(to_del)

    if verbose:
        if len(lst_missed):
            util.say( "Missed job found: %s", ', '.join(lst_missed) )
        if len(to_del):
            util.say( "Remove absent job: %s", ', '.join(to_del) )
Пример #2
0
 def _print_cfg_error( self, e ):
     if e.lineno not in self.cfg_errors:
         say( "%s:%d - %s", [ self.fname, e.lineno, str(e) ] )
         self.cfg_errors.add( e.lineno )
     if ( self.strictError or
          util.makebool( self.config.setdefault('',{}).setdefault(None,{}).get('STRICT',0) ) ):
         raise StrictError()
Пример #3
0
def load_jobdir(joblist, delAbsent=True, verbose=True):
    job_path = joblist.jobsdir
    megui_jobs = joblist.getJobList()
    files = util.scan_dir(job_path,
                          recursive=False,
                          pattern='job*',
                          verbose=False)
    lst_missed = []
    to_del = []
    for f in sorted(files):
        f = os.path.basename(f)
        m = re_job.match(f)
        if m:
            if m.group(1) not in megui_jobs:
                lst_missed.append(m.group(0))
                joblist.appendJob(m.group(1))
    joblist.findMaxJobNum()
    if delAbsent:
        lst = map(lambda f: os.path.basename(f), files)
        for job in megui_jobs:
            if job + ".xml" not in lst:
                to_del.append(job)
        if len(to_del):
            joblist.delJob(to_del)

    if verbose:
        if len(lst_missed):
            util.say("Missed job found: %s", ', '.join(lst_missed))
        if len(to_del):
            util.say("Remove absent job: %s", ', '.join(to_del))
Пример #4
0
def delay_20(agi):
    util.metric(agi, 'delay-20')
    agi.appexec('wait', 5)
    util.say(agi, 'please-hold')
    agi.appexec('wait', 1)
    util.say(agi, 'for-the-next-available-outgoing-line')
    agi.appexec('wait', 3)
    agi.appexec('MusicOnHold', ',6')
    agi.appexec('wait', 1)
Пример #5
0
def delay_20(agi):
    util.metric(agi, 'friction-delay-20')
    agi.appexec('wait', 5)
    util.say(agi, 'please-hold')
    agi.appexec('wait', 1)
    util.say(agi, 'for-the-next-available-outgoing-line')
    agi.appexec('wait', 3)
    agi.appexec('MusicOnHold', ',6')
    agi.appexec('wait', 1)
Пример #6
0
def progged(agi_o):
  game_state = create_game_state(5)
  while True:
    files_to_play = communicate_game_state(game_state)
    if files_to_play == ["you-win"]:
        util.say(agi_o, "you-win", preferred_sub='challenge', escape=True)
        return 0
    move = play_list_and_get_input(agi_o, files_to_play)
    if move in set('0123456789'):
        game_state = update_game_state(game_state, MOVES[int(move)-1])
Пример #7
0
def prompt_and_record(agi_o, statement, username):
    """
    Play prompts for statement, record gsm file,
    save at path for statement and username.
    """
    util.say(agi_o, 'please-repeat')
    util.say(agi_o, statement)
    path = statement_path(statement, username)
    path_in = path + ':gsm'
    agi_o.appexec('record', path_in)
    agi_o.stream_file(path)
Пример #8
0
    def get_opt( self, optdict, optname, subname=None ):
        if optname not in self.opt:
            say("INTERNAL FAILURE: unknown option %s", optname )
            return None
        if subname is None:
            src_value = optdict.get( optname, None )
        else:
            src_value = optdict.get( optname, {} ).get(subname, None)

        rv = self.opt[optname]( '' if src_value is None else src_value )
        DBG_trace( "__getopt(%s,%s) = %s (%s)", (optname, subname, src_value, str(rv) ) )
        return rv
Пример #9
0
def play_list_and_get_input(agi_o, files):
  # print(files)
  for file in files:
    digit = util.say(
        agi_o, file, preferred_sub='challenge', escape=True)
    if digit is not '':
      return digit
  digit = util.say(
      agi_o, "enter-move", preferred_sub='challenge', escape=True)
  if digit is not '':
    return digit
  return agi_o.wait_for_digit(timeout=-1)
Пример #10
0
    def load_config( self, fname, strictError = False, content = None ):
        self.fname = fname
        self.cfg_errors = set()
        self.strictError = strictError

        DBG_trace( "ConfigLoader.load_config(%s)", fname )

        if content is None:
            try:
                with open(fname,'rt') as f:
                    content = f.readlines()
            except Exception as e:
                say( "Error: can't load '%s' config", fname )
                return

        section_re = re.compile("^\[([A-Z\_]+)\] *(\= *([A-Za-z0-9\_]+))?")

        lineno = 1
        prev = [ '', None, [], 1 ]        # [0section_type, 1section_name, 2content, 3startline]
        for l in content:
            try:
                if len( l.strip() )==0:                 # skip empty lines
                    continue
                ##print lineno, l.strip()				##DEBUG

                if l[0]!='[':                           # if not section header - just continue to accumulate lines
                    continue

                # this is the section header - process finished and reset control array to new sections
                m = section_re.match(l)
                if m is None:
                    raise CfgError( lineno, "Malformed section header" )
                elif m.group(1) not in self.sections.keys():
                    raise CfgError( lineno, "Unknown section type %s"%m.group(1) )
                self._process_section( *prev )
                DBG_trace( "detect section at %d: %s|%s", ( lineno, m.group(1),m.group(3) ) )
                prev = [ m.group(1), m.group(3), [], lineno + 2 ]		# +1 to compensate [section line], +1 because numeration from 1

            except CfgError as e:
                self._print_cfg_error( e )
                say( l.strip() )
            finally:                                    # on each line: remember content and increase lineno
                prev[2].append(l)
                lineno += 1

        ##print "WRITE ", lineno, len(prev[2]), l.strip()		##DEBUG
        try:
            # process final section
            self._process_section( *prev )
        except CfgError as e:
            self._print_cfg_error( e )
Пример #11
0
def vmauthenticate(agi):
    """Authenticate a voice mailbox and continue, or busy."""
    util.metric(agi, 'friction-vmauthenticate')
    # Note vmauthenticate lets user jump to 'a' extension if existing,
    # so don't call this in a context that defines that!
    try:
        util.say(agi, 'authenticate-with-your-voice-mail-box-to-continue')
        res = agi.appexec('VMAuthenticate')
    except Exception as exc:
        # we expect AGIAppError('Error executing application, or hangup',)
        util.metric(agi, 'friction-vmauthenticate-deny')
        agi.appexec('busy')
        # above command should not exit
    else:
        util.metric(agi, 'friction-vmauthenticate-allow')
Пример #12
0
def vmauthenticate(agi):
    """Authenticate a voice mailbox and continue, or busy."""
    util.metric(agi, 'friction-vmauthenticate')
    # Note vmauthenticate lets user jump to 'a' extension if existing,
    # so don't call this in a context that defines that!
    try:
        util.say(agi, 'authenticate-with-your-voice-mail-box-to-continue')
        res = agi.appexec('VMAuthenticate')
    except Exception as exc:
        # we expect AGIAppError('Error executing application, or hangup',)
        util.metric(agi, 'friction-vmauthenticate-deny')
        agi.appexec('busy')
        # above command should not exit
    else:
        util.metric(agi, 'friction-vmauthenticate-allow')
Пример #13
0
def say_and_get_digit(agi_o, statement):
    """Say statement and return digit pressed, or None."""
    choice = util.say(agi_o, statement, escape=True)
    try:
        return int(choice)
    except Exception:
        return None
Пример #14
0
def digit_pound(agi_o):
    says = ['to-accept', 'press-pound', 'to-reject', 'press-any-key']
    for say in says:
        digit = util.say(agi_o, say, escape=True)
        if digit is not '':
            return digit == '#'
    digit = agi_o.wait_for_digit(timeout=-1)
    return digit == '#'
Пример #15
0
def interruptable_statements(agi_o, statements):
    """
    Say statements until a key is entered or statements run out.
    Return key entered or None.
    """
    for statement in statements:
        digit = util.say(agi_o, statement, escape=True)
        if digit is not '':
            return digit
    return None
Пример #16
0
def record_statements(agi_o):
    """Top function to prompt, select, and record statements."""
    username = get_username()
    util.say(agi_o, 'hello')

    statement_keys = [group['name'] for group in statements.statement_groups]
    statement_key = prompt_menu(agi_o, statement_keys)

    intro_statements = [
        'after-each-statement', 'please-repeat', 'then-press-pound',
        'to-begin', 'press-any-key'
    ]
    digit = interruptable_statements(agi_o, intro_statements)
    if digit is None:
        agi_o.wait_for_digit(timeout=-1)

    (statement_statements, ) = [
        group['statements'] for group in statements.statement_groups
        if group['name'] == statement_key
    ]

    for statement in statement_statements:
        prompt_and_record(agi_o, statement, username)
        while not digit_pound(agi_o):
            prompt_and_record(agi_o, statement, username)

    util.say(agi_o, 'thank-you')
    util.say(agi_o, 'goodbye')
    util.metric(agi_o, 'record-menu')
Пример #17
0
    def __init__(self, n_d, vocab, fix_init_embs=True):
        lst_words = []
        vocab_map = {}

        self.init_end = None

        for w in xrange(vocab):
            if self.init_end is None:
                self.init_end = len(lst_words) if fix_init_embs else -1
            vocab_map[w] = len(vocab_map)
            lst_words.append(w)

        emb_vals = np.array(
            [[pos / np.power(10000, 2 * i / n_d)
              for i in range(n_d)] if pos != 0 else np.zeros(n_d)
             for pos in range(vocab)])

        emb_vals[1:, 0::2] = np.sin(emb_vals[1:, 0::2])  # dim 2i
        emb_vals[1:, 1::2] = np.cos(emb_vals[1:, 1::2])

        say("{} sinosodal-init embeddings loaded.\n".format(len(emb_vals)))

        emb_vals = np.vstack(emb_vals).astype(theano.config.floatX)
        self.vocab_map = vocab_map
        self.lst_words = lst_words

        self.oov_tok = None
        self.oov_id = -1

        self.embeddings = create_shared(emb_vals)
        if self.init_end > -1:
            self.embeddings_trainable = self.embeddings[self.init_end:]
        else:
            self.embeddings_trainable = self.embeddings

        self.n_V = len(self.vocab_map)
        self.n_d = n_d
Пример #18
0
    def __init__(self, n_d, vocab, oov="<unk>", embs=None, fix_init_embs=True):

        if embs is not None:
            lst_words = []
            vocab_map = {}
            emb_vals = []

            self.init_end = None

            for word in vocab:

                if word in embs:
                    vocab_map[word] = len(vocab_map)
                    vector = embs[word]

                    emb_vals.append(vector)
                    lst_words.append(word)
                else:
                    if self.init_end is None:
                        self.init_end = len(emb_vals) if fix_init_embs else -1

                    vocab_map[word] = len(vocab_map)
                    emb_vals.append(random_init((n_d,)) * (0.0 if (word == oov) else 0.001))
                    lst_words.append(word)

            if n_d != len(emb_vals[0]):
                say("WARNING: n_d ({}) != init word vector size ({}). Use {} instead.\n".format(
                    n_d, len(emb_vals[0]), len(emb_vals[0])
                ))
                n_d = len(emb_vals[0])

            say("{} pre-trained embeddings loaded.\n".format(len(emb_vals)))

            emb_vals = np.vstack(emb_vals).astype(theano.config.floatX)
            self.vocab_map = vocab_map
            self.lst_words = lst_words
        else:# TODO: Update to above
            lst_words = []
            vocab_map = {}
            for word in vocab:
                if word not in vocab_map:
                    vocab_map[word] = len(vocab_map)
                    lst_words.append(word)

            self.lst_words = lst_words
            self.vocab_map = vocab_map
            emb_vals = random_init((len(self.vocab_map), n_d))
            self.init_end = -1

        if oov is not None and oov is not False:
            assert oov in self.vocab_map, "oov {} not in vocab".format(oov)
            self.oov_tok = oov
            self.oov_id = self.vocab_map[oov]
        else:
            self.oov_tok = None
            self.oov_id = -1

        self.embeddings = create_shared(emb_vals)
        if self.init_end > -1:
            self.embeddings_trainable = self.embeddings[self.init_end:]
        else:
            self.embeddings_trainable = self.embeddings

        self.n_V = len(self.vocab_map)
        self.n_d = n_d
Пример #19
0
def talkToBeardedLadies(room):
    print "In a gruff voice the lady says:"
    say ('"Nice to meet you."')
Пример #20
0
def talkToCarnie(room) :
    say ("\"Care to test your luck and your skill at a game that tests your luck\n"
           "and skill?\" he says, \"For indeed the two go hand in hand.\"")
Пример #21
0
    def train(self):
        args = self.args

        padding_id = self.embedding_layer.vocab_map["<padding>"]

        updates_e, lr_e, gnorm_e = create_optimization_updates(
            cost=self.encoder.cost_e,
            params=self.encoder.params,
            method=args.learning,
            beta1=args.beta1,
            beta2=args.beta2,
            lr=args.learning_rate)[:3]

        updates_g, lr_g, gnorm_g = create_optimization_updates(
            cost=self.encoder.cost_g,
            params=self.generator.params,
            method=args.learning,
            beta1=args.beta1,
            beta2=args.beta2,
            lr=args.learning_rate)[:3]

        outputs_d = [
            self.generator.non_sampled_zpred, self.encoder.obj,
            self.encoder.loss, self.encoder.preds_clipped
        ]
        outputs_t = [
            self.encoder.obj, self.encoder.loss, self.z, self.encoder.zsum,
            self.encoder.zdiff, self.encoder.word_overlap_loss,
            self.encoder.loss_vec, self.encoder.cost_logpz, self.encoder.logpz,
            self.encoder.cost_vec, self.encoder.preds_clipped,
            self.encoder.cost_g, self.encoder.l2_cost, self.generator.l2_cost,
            self.encoder.softmax_mask
        ]

        inputs_d = [
            self.x, self.generator.posit_x, self.y, self.bm,
            self.gold_standard_entities, self.fw_mask, self.chunk_sizes,
            self.encoder.loss_mask
        ]
        inputs_t = [
            self.x, self.generator.posit_x, self.y, self.bm,
            self.gold_standard_entities, self.fw_mask, self.chunk_sizes,
            self.encoder.loss_mask
        ]

        eval_generator = theano.function(inputs=inputs_d,
                                         outputs=outputs_d,
                                         updates=self.generator.sample_updates,
                                         on_unused_input='ignore')

        train_generator = theano.function(inputs=inputs_t,
                                          outputs=outputs_t,
                                          updates=updates_e.items() +
                                          updates_g.items() +
                                          self.generator.sample_updates,
                                          on_unused_input='ignore')

        say("Model Built Full\n\n")

        unchanged = 0
        best_dev = 1e+2
        last_train_avg_cost = None
        last_dev_avg_cost = None
        tolerance = 0.10 + 1e-3
        dropout_prob = np.float64(args.dropout).astype(theano.config.floatX)

        filename = myio.create_json_filename(args)
        ofp_train = open(filename, 'w+')
        ofp_train_leaks = open(filename.replace('.json', '_leaks.json'), 'w+')

        json_train = dict()
        json_train_leaks = dict()

        random.seed(datetime.now())

        for epoch in xrange(args.max_epochs):
            unchanged += 1
            more_count = 0

            say("Unchanged : {}\n".format(unchanged))

            if unchanged > 25:
                break

            more = True
            if args.decay_lr:
                param_bak = [p.get_value(borrow=False) for p in self.params]

            while more:
                train_cost = 0.0
                train_loss = 0.0
                p1 = 0.0
                more_count += 1

                if more_count > 5:
                    break
                start_time = time.time()

                loss_all = []
                obj_all = []
                zsum_all = []
                bigram_loss_all = []
                loss_vec_all = []
                z_diff_all = []
                cost_logpz_all = []
                cost_generator_ls = []
                logpz_all = []
                z_pred_all = []
                cost_vec_all = []
                l2_generator = []
                l2_encoder = []

                num_files = args.num_files_train
                N = args.online_batch_size * num_files

                for i in xrange(num_files):

                    train_batches_x, train_batches_y, train_batches_e, train_batches_bm, _, train_batches_fw, train_batches_csz, train_batches_bpi = myio.load_batches(
                        args.batch_dir + args.source + 'train', i)

                    cur_len = len(train_batches_x)

                    perm2 = range(cur_len)
                    random.shuffle(perm2)

                    train_batches_x = [train_batches_x[k] for k in perm2]
                    train_batches_y = [train_batches_y[k] for k in perm2]
                    train_batches_e = [train_batches_e[k] for k in perm2]
                    train_batches_bm = [train_batches_bm[k] for k in perm2]
                    train_batches_fw = [train_batches_fw[k] for k in perm2]
                    train_batches_csz = [train_batches_csz[k] for k in perm2]
                    train_batches_bpi = [train_batches_bpi[k] for k in perm2]

                    for j in xrange(cur_len):
                        if args.full_test:
                            if (i * args.online_batch_size + j + 1) % 10 == 0:
                                say("\r{}/{} {:.2f}       ".format(
                                    i * args.online_batch_size + j + 1, N,
                                    p1 / (i * args.online_batch_size + j + 1)))
                        elif (i * args.online_batch_size + j + 1) % 10 == 0:
                            say("\r{}/{} {:.2f}       ".format(
                                i * args.online_batch_size + j + 1, N,
                                p1 / (i * args.online_batch_size + j + 1)))

                        bx, by, be, bm, bfw, bcsz, bpi = train_batches_x[j], train_batches_y[j], train_batches_e[j], \
                                                  train_batches_bm[j], train_batches_fw[j], train_batches_csz[j], train_batches_bpi[j]
                        be, blm = myio.create_1h(be, args.n)

                        cost, loss, z, zsum, zdiff, bigram_loss, loss_vec, cost_logpz, logpz, cost_vec, preds_tr, cost_g, l2_enc, l2_gen, soft_mask = train_generator(
                            bx, bpi, by, bm, be, bfw, bcsz, blm)

                        mask = bx != padding_id

                        obj_all.append(cost)
                        loss_all.append(loss)
                        zsum_all.append(np.mean(zsum))
                        loss_vec_all.append(np.mean(loss_vec))
                        z_diff_all.append(np.mean(zdiff))
                        cost_logpz_all.append(np.mean(cost_logpz))
                        logpz_all.append(np.mean(logpz))
                        z_pred_all.append(np.mean(np.sum(z, axis=0)))
                        cost_vec_all.append(np.mean(cost_vec))
                        bigram_loss_all.append(np.mean(bigram_loss))
                        l2_encoder.append(l2_enc)
                        l2_generator.append(l2_gen)
                        cost_generator_ls.append(cost_g)

                        train_cost += cost
                        train_loss += loss

                        p1 += np.sum(z * mask) / (np.sum(mask) + 1e-8)

                cur_train_avg_cost = train_cost / N

                if args.dev:
                    self.dropout.set_value(0.0)
                    dev_obj, dev_z, dev_x, dev_sha, _ = self.evaluate_data(
                        eval_generator)
                    self.dropout.set_value(dropout_prob)
                    cur_dev_avg_cost = dev_obj

                more = False

                if args.decay_lr and last_train_avg_cost is not None:
                    if cur_train_avg_cost > last_train_avg_cost * (1 +
                                                                   tolerance):
                        more = True
                        say("\nTrain cost {} --> {}\n".format(
                            last_train_avg_cost, cur_train_avg_cost))
                    if args.dev and cur_dev_avg_cost > last_dev_avg_cost * (
                            1 + tolerance):
                        more = True
                        say("\nDev cost {} --> {}\n".format(
                            last_dev_avg_cost, cur_dev_avg_cost))

                if more:
                    lr_val = lr_g.get_value() * 0.5
                    lr_val = np.float64(lr_val).astype(theano.config.floatX)
                    lr_g.set_value(lr_val)
                    lr_e.set_value(lr_val)
                    say("Decrease learning rate to {}\n".format(float(lr_val)))
                    for p, v in zip(self.params, param_bak):
                        p.set_value(v)
                    continue

                myio.record_observations_verbose(json_train, epoch + 1,
                                                 loss_all, obj_all, zsum_all,
                                                 loss_vec_all, z_diff_all,
                                                 cost_logpz_all, logpz_all,
                                                 z_pred_all, cost_vec_all,
                                                 np.mean(cost_generator_ls))

                last_train_avg_cost = cur_train_avg_cost

                say("\n")
                say(("Generator Epoch {:.2f}  costg={:.4f}  lossg={:.4f}  " +
                     "\t[{:.2f}m / {:.2f}m]\n").format(
                         epoch + 1, train_cost / N, train_loss / N,
                         (time.time() - start_time) / 60.0,
                         (time.time() - start_time) / 60.0 /
                         (i * args.online_batch_size + j + 1) * N))

                if args.dev:
                    last_dev_avg_cost = cur_dev_avg_cost
                    if dev_obj < best_dev:
                        best_dev = dev_obj
                        unchanged = 0
                        if args.save_model:
                            filename = args.save_model + myio.create_fname_identifier(
                                args)
                            self.save_model(filename, args)
                            json_train['BEST_DEV_EPOCH'] = epoch

                            myio.save_dev_results(self.args, None, dev_z,
                                                  dev_x, dev_sha)

            if more_count > 5:
                json_train[
                    'ERROR'] = 'Stuck reducing error rate, at epoch ' + str(
                        epoch + 1) + '. LR = ' + str(lr_val)
                json.dump(json_train, ofp_train)
                ofp_train.close()
                return

        if unchanged > 20:
            json_train['UNCHANGED'] = unchanged

        json.dump(json_train, ofp_train)
        json.dump(json_train_leaks, ofp_train_leaks)
        ofp_train_leaks.close()
        ofp_train.close()
Пример #22
0
 def say(self, statement):
     """ util.say wrapper """
     util.say(self.agi_o, statement, preferred_sub='tishbite-wait')
Пример #23
0
    def ready(self):
        embedding_layer = self.embedding_layer
        args = self.args
        padding_id = embedding_layer.vocab_map["<padding>"]

        dropout = self.dropout = theano.shared(
            np.float64(args.dropout).astype(theano.config.floatX))

        # len*batch
        x = self.x = T.imatrix()

        n_d = args.hidden_dimension
        n_e = embedding_layer.n_d
        activation = get_activation_by_name(args.activation)

        layers = self.layers = []
        layer_type = args.layer.lower()
        for i in xrange(2):
            if layer_type == "rcnn":
                l = RCNN(n_in=n_e,
                         n_out=n_d,
                         activation=activation,
                         order=args.order)
            elif layer_type == "lstm":
                l = LSTM(n_in=n_e, n_out=n_d, activation=activation)
            layers.append(l)

        # len * batch
        self.masks = T.cast(T.neq(x, padding_id), theano.config.floatX)

        # (len*batch)*n_e
        embs = embedding_layer.forward(x.ravel())
        # len*batch*n_e
        embs = embs.reshape((x.shape[0], x.shape[1], n_e))
        embs = apply_dropout(embs, dropout)
        self.word_embs = embs

        flipped_embs = embs[::-1]

        # len*batch*n_d
        h1 = layers[0].forward_all(embs)
        h2 = layers[1].forward_all(flipped_embs)

        h_final = T.concatenate([h1, h2[::-1]], axis=2)
        h_final = apply_dropout(h_final, dropout)
        size = n_d * 2

        h1_sent = h1[args.sentence_length - 1::args.sentence_length]
        h2_sent = h2[args.sentence_length - 1::args.sentence_length]
        # h_final_sent = T.concatenate([h1_sent, h2_sent[::-1]], axis=2)
        # h_final_sent = apply_dropout(h_final_sent, dropout)

        output_layer = self.output_layer = ZLayer(
            n_in=size, n_hidden=args.hidden_dimension2, activation=activation)

        # sample z given text (i.e. x)
        z_pred, sample_updates = output_layer.sample_all(h_final)

        # we are computing approximated gradient by sampling z;
        # so should mark sampled z not part of the gradient propagation path
        #
        z_pred = self.z_pred = theano.gradient.disconnected_grad(z_pred)
        self.sample_updates = sample_updates
        print "z_pred", z_pred.ndim

        probs_word = output_layer.forward_all(h_final, z_pred)

        # SENTENCE LEVEL

        # output_layer_sent = self.output_layer_sent = ZLayer(
        #     n_in=size,
        #     n_hidden=args.hidden_dimension2,
        #     activation=activation
        # )
        #
        # z_pred_sent, sample_updates_sent = output_layer_sent.sample_all(h_final_sent)
        #
        # z_pred_sent = self.z_pred_sent = theano.gradient.disconnected_grad(z_pred_sent)
        # self.sample_updates_sent = sample_updates_sent
        #
        # probs_sent = output_layer_sent.forward_all(h_final_sent, z_pred_sent)
        #
        # z_pred_sent = T.repeat(z_pred_sent, args.sentence_length, axis=0)
        self.z_pred_combined = z_pred

        # probs_sent = T.repeat(probs_sent, args.sentence_length, axis=0)
        probs = probs_word

        logpz = -T.nnet.binary_crossentropy(probs,
                                            self.z_pred_combined) * self.masks
        logpz = self.logpz = logpz.reshape(x.shape)
        probs = self.probs = probs.reshape(x.shape)

        # batch
        z = self.z_pred_combined
        self.zsum = T.sum(z, axis=0, dtype=theano.config.floatX)
        self.zdiff = T.sum(T.abs_(z[1:] - z[:-1]),
                           axis=0,
                           dtype=theano.config.floatX)

        params = self.params = []
        for l in layers + [output_layer]:
            for p in l.params:
                params.append(p)
        nparams = sum(len(x.get_value(borrow=True).ravel()) \
                      for x in params)
        say("total # parameters: {}\n".format(nparams))

        l2_cost = None
        for p in params:
            if l2_cost is None:
                l2_cost = T.sum(p**2)
            else:
                l2_cost = l2_cost + T.sum(p**2)
        l2_cost = l2_cost * args.l2_reg
        self.l2_cost = l2_cost
Пример #24
0
    def ready(self):
        generator = self.generator
        embedding_layer = self.embedding_layer
        embedding_layer_y = self.embedding_layer_y

        args = self.args
        padding_id = embedding_layer.vocab_map["<padding>"]

        dropout = generator.dropout

        # len*batch
        y = self.y = T.imatrix()
        y_mask = T.cast(T.neq(y, padding_id), theano.config.floatX)

        bv = self.bv = T.imatrix()

        z = generator.z_pred_combined
        z = z.dimshuffle((0, 1, "x"))
        y_mask = y_mask.dimshuffle((0, 1, "x"))

        # batch*nclasses
        n_d = args.hidden_dimension
        n_e = embedding_layer.n_d
        activation = get_activation_by_name(args.activation)

        # (len*batch)*n_e
        embs = generator.word_embs
        # (gs_len*batch)*n_e
        embs_y = embedding_layer_y.forward(y.ravel())
        embs_y = embs_y.reshape((y.shape[0], y.shape[1], n_e))

        l = ExtRCNN(n_in=n_e,
                    n_out=n_d,
                    activation=activation,
                    order=args.order)

        h_prev = embs
        h_prev_y = embs_y
        # len*batch*n_d
        h_next_y = l.forward_all_2(h_prev_y, y_mask)
        h_next_y = theano.gradient.disconnected_grad(h_next_y)

        h_next = l.forward_all(h_prev, z)

        h_next = h_next[::args.sentence_length]
        h_final_y = h_next_y[::args.sentence_length_hl]

        h_final = apply_dropout(h_next, dropout)

        h_final_y = h_final_y.dimshuffle(1, 0, 2)  # 15 x 4 x 200
        h_final = h_final.dimshuffle(1, 0, 2)  # 15 x 10 x 200

        h_final_y_r = (h_final_y**2).sum(2, keepdims=True)  # 15 x 4 x 1
        h_final_r = (h_final**2).sum(2, keepdims=True).dimshuffle(
            0, 2, 1)  # 15 x 1 x 10

        batched_dot = T.batched_dot(h_final_y,
                                    h_final.dimshuffle(0, 2, 1))  # 15 x 4 x 10

        squared_euclidean_distances = h_final_y_r + h_final_r - 2 * batched_dot  # (15 x 4 x 1 + 15 x 1 x 10) +  (15 x 4 x 10)
        similarity = T.sqrt(squared_euclidean_distances).dimshuffle(
            1, 0, 2)  # 4 x 15 x 10

        loss_mat = self.loss_mat = T.min(similarity, axis=2,
                                         keepdims=True)  # 4 x 15 x 1

        self.loss_vec = loss_vec = T.mean(loss_mat, axis=0)

        zsum = generator.zsum
        zdiff = generator.zdiff
        logpz = generator.logpz

        padded = T.shape_padaxis(T.zeros_like(bv[0]), axis=1).dimshuffle(
            (1, 0))
        component_2 = T.concatenate([bv[1:], padded], axis=0)

        # component_2 = T.stack([shifted_bv, bv], axis=2)
        self.bigram_overlap = component_2 * bv

        intersection = T.sum(self.bigram_overlap)
        jac = (intersection +
               args.jaccard_smoothing) / (T.sum(bv) + args.jaccard_smoothing)
        jac = 1 - jac

        coherent_factor = args.sparsity * args.coherent
        loss = self.loss = T.mean(loss_vec)
        self.sparsity_cost = T.mean(zsum) * args.sparsity + \
                             T.mean(zdiff) * coherent_factor

        samp = zsum * args.sparsity + zdiff * coherent_factor
        cost_vec = samp + loss_vec + jac
        cost_logpz = T.mean(cost_vec * T.sum(logpz, axis=0))

        self.obj = T.mean(cost_vec) + jac
        self.encoder_params = l.params

        params = self.params = []

        for p in l.params:
            params.append(p)
        nparams = sum(len(x.get_value(borrow=True).ravel()) \
                      for x in params)
        say("total # parameters: {}\n".format(nparams))

        l2_cost = None
        for p in params:
            if l2_cost is None:
                l2_cost = T.sum(p**2)
            else:
                l2_cost = l2_cost + T.sum(p**2)
        l2_cost = l2_cost * args.l2_reg
        self.l2_cost = l2_cost

        self.cost_g = cost_logpz * 10 + generator.l2_cost
        self.cost_e = loss * 10 + l2_cost
Пример #25
0
    def train(self, train, dev, test, rationale_data):
        args = self.args
        dropout = self.dropout
        padding_id = self.embedding_layer.vocab_map["<padding>"]

        if dev is not None:
            dev_batches_x, dev_batches_y, dev_batches_bv = myio.create_batches(
                dev[0], dev[1], args.batch, padding_id)
        if test is not None:
            test_batches_x, test_batches_y = myio.create_batches(
                test[0], test[1], args.batch, padding_id)
        if rationale_data is not None:
            valid_batches_x, valid_batches_y = myio.create_batches(
                [u["xids"] for u in rationale_data],
                [u["y"] for u in rationale_data],
                args.batch,
                padding_id,
                sort=False)

        # start_time = time.time()
        # train_batches_x, train_batches_y = myio.create_batches(
        #     train[0], train[1], args.batch, padding_id
        # )
        # say("{:.2f}s to create training batches\n\n".format(
        #     time.time() - start_time
        # ))

        updates_e, lr_e, gnorm_e = create_optimization_updates(
            cost=self.encoder.cost_e,
            params=self.encoder.params,
            method=args.learning,
            beta1=args.beta1,
            beta2=args.beta2,
            lr=args.learning_rate)[:3]

        updates_g, lr_g, gnorm_g = create_optimization_updates(
            cost=self.encoder.cost_g,
            params=self.generator.params,
            method=args.learning,
            beta1=args.beta1,
            beta2=args.beta2,
            lr=args.learning_rate)[:3]

        sample_generator = theano.function(
            inputs=[self.x],
            outputs=self.z,
            updates=self.generator.sample_updates)

        # get_loss_and_pred = theano.function(
        #     inputs=[self.x, self.y],
        #     outputs=[self.encoder.loss_vec, self.z],
        #     updates=self.generator.sample_updates + self.generator.sample_updates_sent
        # )
        #
        eval_generator = theano.function(
            inputs=[self.x, self.y, self.bv],
            outputs=[self.z, self.encoder.obj, self.encoder.loss],
            updates=self.generator.sample_updates)

        train_generator = theano.function(
            inputs=[self.x, self.y, self.bv],
            outputs=[
                self.encoder.obj, self.encoder.loss,
                self.encoder.sparsity_cost, self.z, gnorm_e, gnorm_g
            ],
            updates=updates_e.items() + updates_g.items() +
            self.generator.sample_updates)

        eval_period = args.eval_period
        unchanged = 0
        best_dev = 1e+2
        best_dev_e = 1e+2
        last_train_avg_cost = None
        last_dev_avg_cost = None
        tolerance = 0.10 + 1e-3
        dropout_prob = np.float64(args.dropout).astype(theano.config.floatX)

        metric_output = open(
            args.train_output_readable + '_METRICS' + '_sparcity_' +
            str(args.sparsity) + '.out', 'w+')

        if args.dev_baseline:
            ofp1 = open(
                args.train_output_readable + '_METRICS' + '_sparcity_' +
                str(args.sparsity) + '_baseline.out', 'w+')
            ofp2 = open(
                args.train_output_readable + '_sparcity_' +
                str(args.sparsity) + '_baseline.out', 'w+')

            dz = myio.convert_bv_to_z(dev_batches_bv)

            myio.write_train_results(dz[0], dev_batches_x[0], dev_batches_y[0],
                                     self.embedding_layer, ofp2, padding_id)
            myio.write_summ_for_rouge(args, dz, dev_batches_x, dev_batches_y,
                                      self.embedding_layer)
            myio.write_metrics(-1, -1, ofp1, -1, args)

            ofp1.close()
            ofp2.close()

        for epoch in xrange(args.max_epochs):
            read_output = open(
                args.train_output_readable + '_e_' + str(epoch) +
                '_sparcity_' + str(args.sparsity) + '.out', 'w+')
            total_words_per_epoch = 0
            total_summaries_per_epoch = 0
            unchanged += 1
            if unchanged > 20:
                metric_output.write("PROBLEM TRAINING, NO DEV IMPROVEMENT")
                metric_output.close()
                break

            train_batches_x, train_batches_y, train_batches_bv = myio.create_batches(
                train[0], train[1], args.batch, padding_id)

            more = True
            if args.decay_lr:
                param_bak = [p.get_value(borrow=False) for p in self.params]

            while more:
                processed = 0
                train_cost = 0.0
                train_loss = 0.0
                train_sparsity_cost = 0.0
                p1 = 0.0
                start_time = time.time()

                N = len(train_batches_x)
                for i in xrange(N):
                    if (i + 1) % 32 == 0:
                        say("\r{}/{} {:.2f}       ".format(
                            i + 1, N, p1 / (i + 1)))

                    bx, by, bv = train_batches_x[i], train_batches_y[
                        i], train_batches_bv[i]
                    mask = bx != padding_id

                    cost, loss, sparsity_cost, bz, gl2_e, gl2_g = train_generator(
                        bx, by, bv)

                    if i % 64 == 0:
                        self.evaluate_rnn_weights(args, epoch, i)

                    if i % 8 == 0:
                        myio.write_train_results(bz, bx, by,
                                                 self.embedding_layer,
                                                 read_output, padding_id)

                    k = len(by)
                    processed += k
                    train_cost += cost
                    train_loss += loss
                    train_sparsity_cost += sparsity_cost
                    p1 += np.sum(bz * mask) / (np.sum(mask) + 1e-8)

                    total_summaries_per_epoch += args.batch
                    total_words_per_epoch += myio.total_words(bz)

                cur_train_avg_cost = train_cost / N

                if dev:
                    self.dropout.set_value(0.0)
                    dev_obj, dev_loss, dev_p1, dev_v, dev_x, dev_y = self.evaluate_data(
                        dev_batches_x,
                        dev_batches_y,
                        dev_batches_bv,
                        eval_generator,
                        sampling=True)

                    self.dropout.set_value(dropout_prob)
                    cur_dev_avg_cost = dev_obj

                    myio.write_train_results(dev_v[0], dev_x[0], dev_y[0],
                                             self.embedding_layer, read_output,
                                             padding_id)
                    myio.write_summ_for_rouge(args, dev_v, dev_x, dev_y,
                                              self.embedding_layer)
                    myio.write_metrics(total_summaries_per_epoch,
                                       total_words_per_epoch, metric_output,
                                       epoch, args)

                    metric_output.flush()

                more = False
                if args.decay_lr and last_train_avg_cost is not None:
                    if cur_train_avg_cost > last_train_avg_cost * (1 +
                                                                   tolerance):
                        more = True
                        say("\nTrain cost {} --> {}\n".format(
                            last_train_avg_cost, cur_train_avg_cost))
                    if dev and cur_dev_avg_cost > last_dev_avg_cost * (
                            1 + tolerance):
                        more = True
                        say("\nDev cost {} --> {}\n".format(
                            last_dev_avg_cost, cur_dev_avg_cost))

                if more:
                    lr_val = lr_g.get_value() * 0.5
                    lr_val = np.float64(lr_val).astype(theano.config.floatX)
                    lr_g.set_value(lr_val)
                    lr_e.set_value(lr_val)
                    say("Decrease learning rate to {}\n".format(float(lr_val)))
                    for p, v in zip(self.params, param_bak):
                        p.set_value(v)
                    continue

                last_train_avg_cost = cur_train_avg_cost
                if dev: last_dev_avg_cost = cur_dev_avg_cost

                say("\n")
                say((
                    "Generator Epoch {:.2f}  costg={:.4f}  scost={:.4f}  lossg={:.4f}  "
                    + "p[1]={:.2f}  |g|={:.4f} {:.4f}\t[{:.2f}m / {:.2f}m]\n"
                ).format(epoch + (i + 1.0) / N, train_cost / N,
                         train_sparsity_cost / N, train_loss / N, p1 / N,
                         float(gl2_e), float(gl2_g),
                         (time.time() - start_time) / 60.0,
                         (time.time() - start_time) / 60.0 / (i + 1) * N))
                say("\t" + str(["{:.2f}".format(np.linalg.norm(x.get_value(borrow=True))) \
                                for x in self.encoder.params]) + "\n")
                say("\t" + str(["{:.2f}".format(np.linalg.norm(x.get_value(borrow=True))) \
                                for x in self.generator.params]) + "\n")

                if dev:
                    if dev_obj < best_dev:
                        best_dev = dev_obj
                        unchanged = 0
                        # if args.dump and rationale_data:
                        #     self.dump_rationales(args.dump, valid_batches_x, valid_batches_y,
                        #                          get_loss_and_pred, sample_generator)
                        #
                        # if args.save_model:
                        #     self.save_model(args.save_model, args)

                    say(("\tsampling devg={:.4f}  mseg={:.4f}" +
                         "  p[1]g={:.2f}  best_dev={:.4f}\n").format(
                             dev_obj, dev_loss, dev_p1, best_dev))

                    # if rationale_data is not None:
                    #     self.dropout.set_value(0.0)
                    #     r_mse, r_p1, r_prec1, r_prec2 = self.evaluate_rationale(
                    #         rationale_data, valid_batches_x,
                    #         valid_batches_y, eval_generator)
                    #     self.dropout.set_value(dropout_prob)
                    #     say(("\trationale mser={:.4f}  p[1]r={:.2f}  prec1={:.4f}" +
                    #          "  prec2={:.4f}\n").format(
                    #         r_mse,
                    #         r_p1,
                    #         r_prec1,
                    #         r_prec2
                    #     ))

            read_output.close()

        metric_output.close()
Пример #26
0
 def _say(self, file):
     util.say(self.agi_o, file, 'konami')
Пример #27
0
    def pretrain(self):
        args = self.args
        padding_id = self.embedding_layer.vocab_map["<padding>"]

        updates_g, lr_g, gnorm_g = create_optimization_updates(
            cost=self.generator.cost_g,
            params=self.generator.params,
            method=args.learning,
            beta1=args.beta1,
            beta2=args.beta2,
            lr=args.learning_rate)[:3]

        eval_generator = theano.function(
            inputs=[self.x, self.bm],
            outputs=[self.z, self.generator.cost_g, self.generator.obj],
            updates=self.generator.sample_updates)

        train_generator = theano.function(
            inputs=[self.x, self.bm],
            outputs=[
                self.generator.obj, self.z, self.generator.zsum,
                self.generator.zdiff, self.generator.cost_g
            ],
            updates=updates_g.items() + self.generator.sample_updates)

        unchanged = 0
        best_dev = 1e+2
        last_train_avg_cost = None
        last_dev_avg_cost = None
        tolerance = 0.10 + 1e-3
        dropout_prob = np.float64(args.dropout).astype(theano.config.floatX)

        filename = myio.create_json_filename(args)
        ofp_train = open(filename, 'w+')
        json_train = dict()
        rouge_fname = None

        for epoch in xrange(args.max_epochs):
            unchanged += 1
            more_count = 0

            say("Unchanged : {}\n".format(unchanged))

            if unchanged > 20:
                break

            more = True
            if args.decay_lr:
                param_bak = [p.get_value(borrow=False) for p in self.params]

            while more:
                train_cost = 0.0
                train_loss = 0.0
                p1 = 0.0
                more_count += 1

                if more_count > 5:
                    break
                start_time = time.time()

                obj_all = []
                zsum_all = []
                z_diff_all = []
                z_pred_all = []

                num_files = args.num_files_train
                N = args.online_batch_size * num_files

                for i in xrange(num_files):
                    train_batches_x, train_batches_y, train_batches_e, train_batches_bm, train_batches_sha = myio.load_batches(
                        args.batch_dir + args.source + 'train', i)

                    random.seed(5817)
                    perm2 = range(len(train_batches_x))
                    random.shuffle(perm2)

                    train_batches_x = [train_batches_x[k] for k in perm2]
                    train_batches_bm = [train_batches_bm[k] for k in perm2]
                    cur_len = len(train_batches_x)

                    for j in xrange(cur_len):
                        if args.full_test:
                            if (i * args.online_batch_size + j + 1) % 10 == 0:
                                say("\r{}/{} {:.2f}       ".format(
                                    i * args.online_batch_size + j + 1, N,
                                    p1 / (i * args.online_batch_size + j + 1)))
                        elif (i * args.online_batch_size + j + 1) % 10 == 0:
                            say("\r{}/{} {:.2f}       ".format(
                                i * args.online_batch_size + j + 1, N,
                                p1 / (i * args.online_batch_size + j + 1)))

                        bx, bm = train_batches_x[j], train_batches_bm[j]
                        mask = bx != padding_id

                        obj, z, zsum, zdiff, cost_g = train_generator(bx, bm)
                        zsum_all.append(np.mean(zsum))
                        z_diff_all.append(np.mean(zdiff))
                        z_pred_all.append(np.mean(np.sum(z, axis=0)))
                        obj_all.append(np.mean(obj))

                        train_cost += obj

                        p1 += np.sum(z * mask) / (np.sum(mask) + 1e-8)

                cur_train_avg_cost = train_cost / N

                if args.dev:
                    self.dropout.set_value(0.0)
                    dev_obj, dev_z, x, sha_ls = self.evaluate_pretrain_data(
                        eval_generator)
                    self.dropout.set_value(dropout_prob)
                    cur_dev_avg_cost = dev_obj

                more = False

                if args.decay_lr and last_train_avg_cost is not None:
                    if cur_train_avg_cost > last_train_avg_cost * (1 +
                                                                   tolerance):
                        more = True
                        say("\nTrain cost {} --> {}\n".format(
                            last_train_avg_cost, cur_train_avg_cost))
                    if args.dev and cur_dev_avg_cost > last_dev_avg_cost * (
                            1 + tolerance):
                        more = True
                        say("\nDev cost {} --> {}\n".format(
                            last_dev_avg_cost, cur_dev_avg_cost))

                if more:
                    lr_val = lr_g.get_value() * 0.5
                    lr_val = np.float64(lr_val).astype(theano.config.floatX)
                    lr_g.set_value(lr_val)
                    say("Decrease learning rate to {}\n".format(float(lr_val)))
                    for p, v in zip(self.params, param_bak):
                        p.set_value(v)
                    continue

                myio.record_observations_pretrain(json_train, epoch + 1,
                                                  obj_all, zsum_all,
                                                  z_diff_all, z_pred_all)

                last_train_avg_cost = cur_train_avg_cost

                say("\n")
                say(("Generator Epoch {:.2f}  costg={:.4f}  lossg={:.4f}  " +
                     "\t[{:.2f}m / {:.2f}m]\n").format(
                         epoch, train_cost / N, train_loss / N,
                         (time.time() - start_time) / 60.0,
                         (time.time() - start_time) / 60.0 /
                         (i * args.online_batch_size + j + 1) * N))

                if args.dev:
                    last_dev_avg_cost = cur_dev_avg_cost
                    if dev_obj < best_dev:
                        best_dev = dev_obj
                        unchanged = 0
                        if args.save_model:
                            filename = self.args.save_model + 'pretrain/' + myio.create_fname_identifier(
                                self.args)
                            self.save_model(filename, self.args, pretrain=True)
                            json_train['BEST_DEV_EPOCH'] = epoch

                            myio.save_dev_results(self.args, None, dev_z, x,
                                                  sha_ls)

            if more_count > 5:
                json_train[
                    'ERROR'] = 'Stuck reducing error rate, at epoch ' + str(
                        epoch + 1) + '. LR = ' + str(lr_val)
                json.dump(json_train, ofp_train)
                ofp_train.close()
                return

        if unchanged > 20:
            json_train['UNCHANGED'] = unchanged

        json.dump(json_train, ofp_train)
        ofp_train.close()
Пример #28
0
#### Begin loop ####
while loop >= 0:
	loop = loop + 1
	print loop #print the number of times the script has looped
	time.sleep(0.05)#space out the loop so as not to run too fast
	print recv #prints everything received from freenode. Remove this to clean up the debugging
	f.write(recv)# Log recv for later
	#iterate through plugins executing all functions
	data = {
		's': s ,'recv': recv ,
		'nick':nick,
		'loop': loop ,'numr' : numr ,
		'channel' : channel,
		'maxspam' : maxspam,
		'channelops':channelops,
		'plugclass' : plugclass}# format data to send to plugins
	thread.start_new_thread(runplugins,())
	if '!update' in recv and util.argv('!update',recv)['user'] in data['admins']:
        	status = 'Successful'
                try:
                	execfile('./plugins.py')
                        from plugins import *
                       	execfile('./util.py')
                except Exception, err:
                        print sys.exc_info()[1]
                        status = 'Failed'
                args = argv('!update', data['recv'])
                s.send(util.say(args['channel'],'Dynamic update: ' + status))
	#get recv last. I thought this would be a good idea. I can't remember why, but there was a reason.
	recv = s.recv(recvbits)	
Пример #29
0
def main():
    assert args.embedding, "Pre-trained word embeddings required."

    embedding_layer = myio.create_embedding_layer(args.embedding)
    embedding_layer_y = myio.create_embedding_layer(args.embedding)

    max_len_x = args.sentence_length * args.max_sentences
    max_len_y = args.sentence_length_hl * args.max_sentences_hl

    if args.train:
        train_x, train_y = myio.read_docs(args.train)
        train_x = [embedding_layer.map_to_ids(x)[:max_len_x] for x in train_x]
        train_y = [
            embedding_layer_y.map_to_ids(y)[:max_len_y] for y in train_y
        ]

    if args.dev:
        dev_x, dev_y = myio.read_docs(args.dev)
        dev_x = [embedding_layer.map_to_ids(x)[:max_len_x] for x in dev_x]
        dev_y = [embedding_layer_y.map_to_ids(y)[:max_len_y] for y in dev_y]

    if args.load_rationale:
        rationale_data = myio.read_rationales(args.load_rationale)
        for x in rationale_data:
            x["xids"] = embedding_layer.map_to_ids(x["x"])

    if args.train:
        model = Model(args=args,
                      embedding_layer=embedding_layer,
                      embedding_layer_y=embedding_layer_y,
                      nclasses=len(train_y[0]))
        model.ready()

        # debug_func2 = theano.function(
        #        inputs = [ model.x, model.z ],
        #        outputs = model.generator.logpz
        #    )
        # theano.printing.debugprint(debug_func2)
        # return

        model.train(
            (train_x, train_y),
            (dev_x, dev_y) if args.dev else None,
            None,  # (test_x, test_y),
            rationale_data if args.load_rationale else None)

    if args.load_model and args.dev and not args.train:
        model = Model(args=None, embedding_layer=embedding_layer, nclasses=-1)
        model.load_model(args.load_model)
        say("model loaded successfully.\n")

        # compile an evaluation function
        eval_func = theano.function(inputs=[model.x, model.y],
                                    outputs=[
                                        model.z, model.encoder.obj,
                                        model.encoder.loss,
                                        model.encoder.pred_diff
                                    ],
                                    updates=model.generator.sample_updates)

        # compile a predictor function
        pred_func = theano.function(inputs=[model.x],
                                    outputs=[model.z, model.encoder.preds],
                                    updates=model.generator.sample_updates)

        # batching data
        padding_id = embedding_layer.vocab_map["<padding>"]
        dev_batches_x, dev_batches_y = myio.create_batches(
            dev_x, dev_y, args.batch, padding_id)

        # disable dropout
        model.dropout.set_value(0.0)
        dev_obj, dev_loss, dev_diff, dev_p1 = model.evaluate_data(
            dev_batches_x, dev_batches_y, eval_func, sampling=True)
        say("{} {} {} {}\n".format(dev_obj, dev_loss, dev_diff, dev_p1))
Пример #30
0
 def say(self, agi_o, statement):
     """ too lazy to add preferred_subs everywhere """
     util.say(agi_o, statement, preferred_subs=['anzie-wumpus'])
Пример #31
0
    time.sleep(0.05)  #space out the loop so as not to run too fast
    print recv  #prints everything received from freenode. Remove this to clean up the debugging
    f.write(recv)  # Log recv for later
    #iterate through plugins executing all functions
    data = {
        's': s,
        'recv': recv,
        'nick': nick,
        'loop': loop,
        'numr': numr,
        'channel': channel,
        'maxspam': maxspam,
        'channelops': channelops,
        'plugclass': plugclass
    }  # format data to send to plugins
    thread.start_new_thread(runplugins, ())
    if '!update' in recv and util.argv('!update',
                                       recv)['user'] in data['admins']:
        status = 'Successful'
        try:
            execfile('./plugins.py')
            from plugins import *
            execfile('./util.py')
        except Exception, err:
            print sys.exc_info()[1]
            status = 'Failed'
        args = argv('!update', data['recv'])
        s.send(util.say(args['channel'], 'Dynamic update: ' + status))
    #get recv last. I thought this would be a good idea. I can't remember why, but there was a reason.
    recv = s.recv(recvbits)
Пример #32
0
def play_sound(agi_o, name):
    #return agi_o.background(name)
    # XXX testing
    import util
    return util.say(agi_o, name)
Пример #33
0
 def say(self, statement):
     """ too lazy to add preferred_sub everywhere """
     util.say(self.agi_o, statement, preferred_sub='anzie-wumpus')