def _(msg):
    if peforth.vm.debug == 55:
        peforth.ok('55> ', loc=locals(),
                   cmd=":> [0] constant loc55 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        msg.download(msg.fileName)
        return 'Attachment: %s received at %s' % (msg.fileName, time.ctime())
def console(msg, cmd):
    if cmd:
        print(cmd)  # already on the remote side, don't need to echo
        global nextDelay
        nextDelay_msg = '\nNext anti-robot delay time: %i seconds\n' % (
            nextDelay)
        if peforth.vm.debug == 11:
            peforth.ok('11> ', loc=locals(),
                       cmd=":> [0] constant loc11 cr")  # breakpoint

        # re-direct the display to peforth screen-buffer
        peforth.vm.dictate("display-off")
        try:
            # peforth.vm.dictate(cmd)
            peforth.ok('Console> ',
                       loc=locals(),
                       cmd=":> [0] constant console.locals " + cmd + " exit")
        except Exception as err:
            errmsg = "Failed! : {}".format(err)
            peforth.vm.dictate("display-on")
            send_chunk(errmsg + nextDelay_msg, msg.user.send)
        else:
            peforth.vm.dictate("display-on screen-buffer")
            screen = peforth.vm.pop()[0]
            send_chunk(screen + '\nOK\n' + nextDelay_msg, msg.user.send)
def training_step(i, update_test_data, update_train_data):

    # training on batches of 100 images with 100 labels
    batch_X, batch_Y = mnist.train.next_batch(100)

    # compute training values for visualisation
    if update_train_data:
        a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], feed_dict={X: batch_X, Y_: batch_Y})
        datavis.append_training_curves_data(i, a, c)
        datavis.append_data_histograms(i, w, b)
        datavis.update_image1(im)
        print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c))

    # compute test values for visualisation
    if update_test_data:
        a, c, im = sess.run([accuracy, cross_entropy, It], feed_dict={X: mnist.test.images, Y_: mnist.test.labels})
        datavis.append_test_curves_data(i, a, c)
        datavis.update_image2(im)
        print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))

    # learning_rate_decay
    irmax = 0.005
    irmin = 0.00001
    knee = 1000
    ir = irmin + (irmax-irmin)*10**(-i/knee)
    # the backpropagation training step
    sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y, learning_rate_decay:ir})
    if peforth.vm.debug==22: peforth.ok('',loc=locals(), cmd = "---xray--- marker ---xray--- :> [0] inport i autoexec exit")
def _(msg):
    if peforth.vm.debug == 44:
        peforth.ok('44> ', loc=locals(),
                   cmd=":> [0] constant loc44 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        if msg.isAt:
            cmd = msg.text.split(maxsplit=1)[1]  # remove the leading @nickName
            console(msg, cmd)
def _(msg):
    if peforth.vm.debug == 2211:
        peforth.ok('2211> ', loc=locals(),
                   cmd=":> [0] constant loc2211 cr")  # breakpoint
    # msg.download(msg.fileName)  # 放在 working directory 下
    # pred = model.classify(image_path=msg.fileName.strip())
    # results = model.print_scores(pred=pred,k=10,only_first_name=True)
    return predict(msg)
Exemplo n.º 6
0
def picture(msg):
    if peforth.vm.debug == 55:
        peforth.ok(
            '55> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        predict(msg)
Exemplo n.º 7
0
def _(msg):
    if peforth.vm.debug == 22:
        peforth.ok('22> ', loc=locals(), cmd=":> [0] inport cr")  # breakpoint
    msg.download(msg.fileName)
    typeSymbol = {
        PICTURE: 'img',
        VIDEO: 'vid',
    }.get(msg.type, 'fil')
    return '@%s@%s' % (typeSymbol, msg.fileName)
def _(msg):
    global nextDelay
    time.sleep(nextDelay)  # Anti-Robot delay
    nextDelay = random.choice(range(3, 18))
    nextDelay_msg = '\nNext anti-robot delay time: %i seconds\n' % (nextDelay)
    if peforth.vm.debug == 55:
        peforth.ok('55> ', loc=locals(),
                   cmd=":> [0] constant loc55 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        send_chunk(predict(msg) + nextDelay_msg, msg.user.send)
Exemplo n.º 9
0
def _(msg):
    if peforth.vm.debug == 44:
        peforth.ok('44> ', loc=locals(),
                   cmd=":> [0] constant loc44 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        if msg.isAt:
            time.sleep(nextDelay)  # Anti-Robot delay
            cmd = (msg.text + "\n").split(
                "\n", maxsplit=1)[1]  # remove the first line: @nickName ...
            console(msg, cmd)  # 避免帶有空格的 nickName 惹問題
Exemplo n.º 10
0
def _(msg):
    if peforth.vm.debug == 33:
        peforth.ok('33> ', loc=locals(),
                   cmd=":> [0] constant loc33 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        anti_robot_msg = antiRobotSleep()
        msg.download('download\\' + msg.fileName)
        send_chunk(
            'Attachment: %s \nreceived at %s\n' %
            (msg.fileName, time.ctime()) + anti_robot_msg, msg.user.send)
Exemplo n.º 11
0
def attachment(msg):
    if peforth.vm.debug == 33:
        peforth.ok(
            '33> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        msg.download('download\\' + msg.fileName)
        time.sleep(nextDelay)  # Anti-Robot delay
        send_chunk(
            'Attachment: %s \nreceived at %s\n' %
            (msg.fileName, time.ctime()) + nextDelay_msg(), msg.user.send)
Exemplo n.º 12
0
def _(msg):
    global nextDelay
    time.sleep(nextDelay)  # Anti-Robot delay
    nextDelay = random.choice(range(3, 18))
    if peforth.vm.debug == 44:
        peforth.ok('44> ', loc=locals(),
                   cmd=":> [0] constant loc44 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        if msg.isAt:
            cmd = msg.text.split(maxsplit=1)[1]  # remove the leading @nickName
            console(msg, cmd)
            send_chunk('Next anti-robot delay time: %i seconds' % (nextDelay),
                       msg.user.send)
def _(msg):
    global nextDelay
    time.sleep(nextDelay)  # Anti-Robot delay
    nextDelay = random.choice(range(3, 18))
    nextDelay_msg = '\nNext anti-robot delay time: %i seconds\n' % (nextDelay)
    if peforth.vm.debug == 33:
        peforth.ok('33> ', loc=locals(),
                   cmd=":> [0] constant loc33 cr")  # breakpoint
    if msg.user.NickName[:5] == 'AILAB':  # 只在 AILAB 工作,過濾掉其他的。
        msg.download('download\\' + msg.fileName)
        send_chunk(
            'Attachment: %s \nreceived at %s\n' %
            (msg.fileName, time.ctime()) + nextDelay_msg, msg.user.send)
def mfcc_batch_generator(batch_size=10,
                         source=Source.DIGIT_WAVES,
                         target=Target.digits):
    maybe_download(source, DATA_DIR)
    if target == Target.speaker: speakers = get_speakers()
    files = os.listdir(path)  # all 2402 files include 2 folder
    files = [wav for wav in files if wav.endswith(".wav")]
    shuffle(files)
    shreshold = int(len(files) * test_fraction)
    files_test = files[:shreshold]
    files_train = files[shreshold:]

    def feature_label(wav):
        if target == Target.speaker:
            label = one_hot_from_item(speaker(wav), speakers)
        elif target == Target.digits:
            label = dense_to_one_hot(int(wav[0]),
                                     10)  # 咱的 case 檔名第一個字母就是 label
        elif target == Target.first_letter:
            label = dense_to_one_hot((ord(wav[0]) - 48) % 32, 32)
        else:
            raise Exception("todo : labels for Target!")
        wave, sr = librosa.load(path + wav, mono=True)
        mfcc = librosa.feature.mfcc(wave, sr)
        mfcc = np.pad(mfcc, ((0, 0), (0, 80 - len(mfcc[0]))),
                      mode='constant',
                      constant_values=0)
        feature = np.array(mfcc)
        return feature, label

    def getBatch(size, waves):
        X = []
        Y = []
        for i in range(size):
            x, y = feature_label(waves[i])
            X.append(x)
            Y.append(y)
        return X, Y

    while True:
        print("loaded batch of %d train_files, %d test_files" %
              (len(files_train), len(files_test)))
        shuffle(files_train)
        shuffle(files_test)
        train_features, train_labels = getBatch(batch_size, files_train)
        test_features, test_labels = getBatch(batch_size, files_test)
        if peforth.vm.debug == 55: peforth.ok('55x> ', loc=locals())
        yield train_features, train_labels, test_features, test_labels
        if peforth.vm.debug == 55: peforth.ok('55y> ', loc=locals())
        # basic_rnn_seq2seq inputs must be a sequence
        pass
def predict(msg):
    results = time.ctime() + '\n'
    results += 'Google Inception V3 thinks it is:\n'
    msg.download('download\\' +
                 msg.fileName)  # 照片放在 working directory/download 下
    if peforth.vm.debug == 22:
        peforth.ok('22> ', loc=locals(),
                   cmd=":> [0] constant loc22 cr")  # breakpoint
    if msg.fileName.strip().lower().endswith((".jpeg", '.jpg', '.png')):
        pred = model.classify(image_path=('download\\' + msg.fileName).strip())
        results += model.print_scores(pred=pred, k=10, only_first_name=True)
    else:
        results += 'Ooops! jpeg pictures only, please. {} is not one.\n'.format(
            msg.fileName)
    return results
Exemplo n.º 16
0
def recording(msg):
    if peforth.vm.debug == 77:
        peforth.ok(
            '77> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        pathname = 'download/' + msg.fileName
        msg.download(pathname)
        peforth.vm.dictate("sh ffmpeg -i {0} -ar 16000 -ab 256k -ac 1 {0}.wav".
                           format(pathname) + "\ndrop\n")
        result = recognize(audio=pathname + ".wav")
        time.sleep(nextDelay)  # Anti-Robot delay
        send_chunk(
            'Mozilla DeepSpeech heard you said: %s \nreceived at %s\n' %
            (result, time.ctime()) + nextDelay_msg(), msg.user.send)
Exemplo n.º 17
0
def predict(msg):
    if peforth.vm.debug == 22:
        peforth.ok(
            '22> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    results = time.ctime() + '\n'
    results += 'Google Inception V3 Transfered Learning thinks it is:\n'
    pathname = 'download\\' + msg.fileName  # 照片放在 working directory/download 下
    msg.download(pathname)
    # TensorFlow 的 tf.image.decode_bmp/jpen/png/pcm 很差,改用 ffmpeg
    peforth.vm.dictate("dos ffmpeg -i {} -y 1.png".format(pathname) +
                       "\ndrop\n")
    results += ai.predict("1.png")
    peforth.vm.dictate("dos del {}".format(pathname) + "\ndrop\n")
    time.sleep(nextDelay)  # Anti-Robot delay
    send_chunk(results + nextDelay_msg(), msg.user.send)
def chat(msg):
    if peforth.vm.debug == 44:
        peforth.ok(
            '44> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        if msg.isAt:
            cmd = msg.text.split(
                "\n", maxsplit=1)[1]  # remove the first line: @nickName ...
            console(msg, cmd)  # 避免帶有空格的 nickName 惹問題
        else:
            # Shown on the robot computer
            print(time.ctime(msg.CreateTime), end=" ")
            for i in msg.User['MemberList']:
                if i.UserName == msg.ActualUserName:
                    print(i.NickName)
            print(msg.text)
Exemplo n.º 19
0
def chat(msg):
    def myConsole(msg):
        # 本 chat 當作 peforth 命令處理
        cmd = msg.text + '\n'  # 保證可以 split()
        cmd = cmd.split("\n",
                        maxsplit=1)[1]  # remove the first line: @nickName ...
        console(msg, cmd)  # 避免帶有空格的 nickName 惹問題

    def chat(msg):
        # 處裡本 chat, @版主 時當 peforth 命令處理否則只是在 robot PC 上顯示
        if msg.isAt:
            myConsole(msg)
        else:
            # Shown on the robot computer
            print(time.ctime(msg.CreateTime), end=" ")
            for i in msg.User['MemberList']:
                if i.UserName == msg.ActualUserName:
                    print(i.NickName)
            print(msg.text)

    if peforth.vm.debug == 44:
        peforth.ok(
            '44> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        chat(msg)
    else:
        # 開個後門,否則 Chatroom nickName 改了就控制不到。
        # 當 Chatroom nickName 只有部分符合時,並且只針對 PIC 的發言處理。
        # 非 @版主 也在 Robot PC 上顯示出來是為了確認 Robot 有反應。
        if msg.user.NickName.find(chatroom) != -1:
            for i in msg.User['MemberList']:
                if peforth.vm.debug == 44:
                    peforth.ok(
                        '44a> ',
                        loc=locals(),
                        cmd=":> [0] to locals locals :> ['msg'] to msg cr"
                    )  # breakpoint
                if (i.UserName == msg.ActualUserName) and (i.NickName in PIC):
                    chat(msg)
Exemplo n.º 20
0
def console(msg, cmd):
    if cmd:
        print(cmd)  # already on the remote side, don't need to echo
        if peforth.vm.debug == 11:
            peforth.ok('11> ', loc=locals(),
                       cmd=":> [0] constant loc11 cr")  # breakpoint
        # re-direct the display to peforth screen-buffer
        peforth.vm.dictate("display-off")
        try:
            peforth.vm.push((locals(), globals(), 'console prompt'))
            peforth.vm.dictate(":> [0] to console.locals " + cmd)
            # peforth.ok('OK ', loc=locals(),
            #     cmd=":> [0] to console.locals " + cmd + "\n exit")
        except Exception as err:
            errmsg = "Failed! : {}\n".format(err)
            peforth.vm.dictate("display-on")
            send_chunk(errmsg + nextDelay_msg, msg.user.send)
        else:
            peforth.vm.dictate("display-on screen-buffer")
            screen = peforth.vm.pop()[0]
            send_chunk(screen + nextDelay_msg, msg.user.send)
Exemplo n.º 21
0
def console(msg, cmd):
    if cmd:
        print(cmd)  # already on the remote side, don't need to echo.
        peforth.vm.push(msg)
        peforth.vm.dictate("to msg")  # Availablize msg in peforth interpreter
        if peforth.vm.debug == 11:
            peforth.ok('11> ', loc=locals(),
                       cmd=":> [0] to locals cr")  # breakpoint
        # re-direct the display to peforth screen-buffer
        peforth.vm.dictate("display-off")
        try:
            peforth.vm.push((locals(), globals(), 'console prompt'))
            peforth.vm.dictate(":> [0] to locals " + cmd)
        except Exception as err:
            errmsg = "Failed! : {}".format(err)
            peforth.vm.dictate("display-on")
            time.sleep(nextDelay)  # Anti-Robot delay
            send_chunk(errmsg + nextDelay_msg() + "\nOK", msg.user.send)
        else:
            # Normal cases
            peforth.vm.dictate("display-on screen-buffer")
            screen = peforth.vm.pop()[0]
            time.sleep(nextDelay)  # Anti-Robot delay
            send_chunk(screen + nextDelay_msg() + "\nOK", msg.user.send)
Exemplo n.º 22
0
def attachment(msg):
    if peforth.vm.debug == 33:
        peforth.ok(
            '33> ',
            loc=locals(),
            cmd=":> [0] to locals locals :> ['msg'] to msg cr")  # breakpoint
    if msg.user.NickName == chatroom:  # 只在特定的 chatroom 工作,過濾掉其他的。
        pathname = 'download/' + msg.fileName
        msg.download(pathname)
        if msg.fileName.lower().endswith(
            ('.mp3', '.3gpp', '.aac', '.wav', '.wma', '.pcm')):
            peforth.vm.dictate(
                "sh ffmpeg -i '{0}' -ar 16000 -ab 256k -ac 1 '{0}'.wav".format(
                    pathname) + "\ndrop\n")
            result = recognize(audio=pathname + ".wav")
            time.sleep(nextDelay)  # Anti-Robot delay
            send_chunk(
                'Mozilla DeepSpeech heard you said: %s \nreceived at %s\n' %
                (result, time.ctime()) + nextDelay_msg(), msg.user.send)
        else:
            time.sleep(nextDelay)  # Anti-Robot delay
            send_chunk(
                'Attachment: %s \nreceived at %s\n' %
                (msg.fileName, time.ctime()) + nextDelay_msg(), msg.user.send)
def mfcc_batch_generator_old(batch_size=10,
                             source=Source.DIGIT_WAVES,
                             target=Target.digits):
    maybe_download(source, DATA_DIR)
    if target == Target.speaker: speakers = get_speakers()
    batch_features = []
    labels = []
    files = os.listdir(path)  # 應該是整個 folder 的 .wav 檔 2402 個含兩個 folder
    files = [wav for wav in files if wav.endswith(".wav")]
    shuffle(files)
    files_test = files[:int(len(files) * test_fraction)]
    files_train = files[int(len(files) * test_fraction):]
    if peforth.vm.debug == 55: peforth.ok('speech_data.py 55a> ', loc=locals())
    # batch_size 64
    # source is "spoken_numbers_pcm.tar"
    # target is Target.digits <enum 'Target'>
    while True:
        print("loaded batch of %d files" % len(files))
        shuffle(files_train)
        shuffle(files_test)  # 把檔名順序打亂
        for wav in files:  # wav 是 filename 1_Albert_160.wav 之類
            if not wav.endswith(".wav"): continue
            wave, sr = librosa.load(path + wav, mono=True)
            if target == Target.speaker:
                label = one_hot_from_item(speaker(wav), speakers)
            elif target == Target.digits:
                label = dense_to_one_hot(int(wav[0]),
                                         10)  # 咱的 case 檔名第一個字母就是 label
            elif target == Target.first_letter:
                label = dense_to_one_hot((ord(wav[0]) - 48) % 32, 32)
            else:
                raise Exception("todo : labels for Target!")
            labels.append(label)
            mfcc = librosa.feature.mfcc(wave, sr)
            # print(np.array(mfcc).shape)
            mfcc = np.pad(mfcc, ((0, 0), (0, 80 - len(mfcc[0]))),
                          mode='constant',
                          constant_values=0)
            batch_features.append(np.array(mfcc))
            if peforth.vm.debug == 55: peforth.ok('55b> ', loc=locals())
            if len(batch_features) >= batch_size:
                # print(np.array(batch_features).shape)
                # yield np.array(batch_features), labels
                yield batch_features, labels  # basic_rnn_seq2seq inputs must be a sequence
                if peforth.vm.debug == 66: peforth.ok('66> ', loc=locals())
                batch_features = []  # Reset for next batch
                labels = []
Exemplo n.º 24
0
def _(msg):
    if peforth.vm.debug==44: peforth.ok('44> ',loc=locals(),cmd=":> [0] inport cr")  # breakpoint    
    if msg.isAt: 
        cmd = msg.text.split(maxsplit=1)[1] # remove the leading @nickName
        console(msg, cmd)
        return data_sets
    VALIDATION_SIZE = 2000
    local_file = maybe_download(source_data, train_dir)
    train_images = extract_images(TRAIN_INDEX, train=True)
    train_labels = extract_labels(TRAIN_INDEX, train=True, one_hot=one_hot)
    test_images = extract_images(TEST_INDEX, train=False)
    test_labels = extract_labels(TEST_INDEX, train=False, one_hot=one_hot)
    # train_images = train_images[:VALIDATION_SIZE]
    # train_labels = train_labels[:VALIDATION_SIZE:]
    # test_images = test_images[VALIDATION_SIZE:]
    # test_labels = test_labels[VALIDATION_SIZE:]
    data_sets.train = DataSet(train_images, train_labels, load=False)
    data_sets.test = DataSet(test_images, test_labels, load=True)
    # data_sets.validation = DataSet(validation_images, validation_labels, load=True)
    return data_sets


if __name__ == "__main__":
    print("downloading speech datasets")
    maybe_download(Source.DIGIT_SPECTROS)
    maybe_download(Source.DIGIT_WAVES)
    maybe_download(Source.NUMBER_IMAGES)
    maybe_download(Source.NUMBER_WAVES)

peforth.ok('speech_data_00> ',
           loc=locals(),
           cmd='''
   :> [0] value locals
   \ py: vm.debug=55
   exit
   ''')
Exemplo n.º 26
0
        output_layer = args.output_layer

    graph = load_graph(model_file)
    t = read_tensor_from_image_file(file_name,
                                    input_height=input_height,
                                    input_width=input_width,
                                    input_mean=input_mean,
                                    input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        start = time.time()
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
        end = time.time()
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = load_labels(label_file)

    print('\nEvaluation time (1-image): {:.3f}s\n'.format(end - start))

    for i in top_k:
        print(labels[i], results[i])

peforth.ok('22> ', loc=locals(), cmd=":> [0] value locals cr")
Exemplo n.º 27
0

# Initialize debugger peforth
peforth.ok(loc=locals(),
           cmd='''
    :> [0] value main.locals // ( -- dict ) main locals
    \ Check ffmpeg, ffprobe, the needed 'download' directory, and the neural network model 
        dos ffmpeg -version
        [if] cr ." Fatal error! ffmpeg not found which is used to convert pictures." cr bye [then]
        dos ffprobe -version
        [if] cr ." Fatal error! ffprobe not found which is for pictures' duration." cr bye [then]
        dos dir download 
        [if] cr ." Fatal error! 'download' directory not found." cr bye [then]
        dos dir tf_files\models\mobilenet_v1_1.0_224_frozen.tgz 
        dos dir tf_files\models\mobilenet_v1_0.50_224_frozen.tgz 
        or \ only one model needed but I have them downloaded already so check them all
        [if] cr 
             ." Fatal error! neural network model not found." cr 
             ."   tf_files\models\mobilenet_v1_1.0_224_frozen.tgz " cr
             ."   tf_files\models\mobilenet_v1_0.50_224_frozen.tgz" cr
             bye [then]
    \ define variables
        import time constant time // ( -- module )
        none value locals // ( -- dict ) at each breakpoint
        none value msg // ( -- obj ) itchat dynamic msg package 
    \ redefine the 'bye' command to logout itchat first
        : bye main.locals :> ['itchat'].logout() bye ; 
    exit \ Don't forget this!!
    ''')


# Sending message to friend or chatroom depends on the given 'send'
Exemplo n.º 28
0
def _(msg):
    if peforth.vm.debug==11: peforth.ok('11> ',loc=locals(),cmd=":> [0] inport cr")  # breakpoint    
    send_chunk('%s: %s' % (msg.type, msg.text), msg.user.send)
Exemplo n.º 29
0
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import tensorflow as tf
import tensorflowvisu

# -- moved into xray.f -- from tensorflow.examples.tutorials.mnist import input_data as mnist_data
import peforth
peforth.ok(loc=locals(), cmd="include xray.f")

print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)

# neural network with 1 layer of 10 softmax neurons
#
# · · · · · · · · · ·       (input data, flattened pixels)       X [batch, 784] # 784 = 28 * 28
# \x/x\x/x\x/x\x/x\x/    -- fully connected layer (softmax)      W [784, 10]  b[10]
#   · · · · · · · ·                                              Y [batch, 10]

# The model is:
#
# Y = softmax( X * W + b)
#   X: matrix for 100 grayscale images of 28x28 pixels, flattened (there are 100 images in a mini-batch)
#   W: weight matrix with 784 lines and 10 columns
Exemplo n.º 30
0
def _(msg):
    if peforth.vm.debug==99: peforth.ok('99> ',loc=locals(),cmd=":> [0] inport cr")  # breakpoint    
    console(msg, msg.Text.strip())