Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--image', '-i', type=str, default="",
                        help='pass to input image')
    parser.add_argument('--model', '-m', default='my_mnist.model',
                        help='path to the training model')
    parser.add_argument('--unit', '-u', type=int, default=1000,
                        help='Number of units')
    args = parser.parse_args()
    model = MLP(args.unit,10)
    if args.gpu >= 0:
        model.to_gpu(chainer.cuda.get_device_from_id(args.gpu).use())
    serializers.load_npz(args.model, model)
    try:
        img = Image.open(args.image).convert("L").resize((28,28))
    except :
        print("invalid input")
        return
    img_array = model.xp.asarray(img,dtype=model.xp.float32).reshape(1,784)
    with chainer.using_config('train', False), chainer.no_backprop_mode():
        result = model.predict(img_array)
    print("predict:", model.xp.argmax(result.data))
def main():

    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--image',
                        '-i',
                        type=str,
                        default="",
                        help='pass to input image')
    parser.add_argument('--model',
                        '-m',
                        default='my_mnist.model',
                        help='path to the training model')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()
    model = MLP(args.unit, 1)

    if args.gpu >= 0:
        model.to_gpu(chainer.cuda.get_device_from_id(args.gpu).use())
    serializers.load_npz(args.model, model)
    #    try:
    #        img = Image.open(args.image).convert("L").resize((28,28))
    #    except :
    #        print("invalid input")
    #        return
    #    img_array = model.xp.asarray(img,dtype=model.xp.float32).reshape(1,784)

    ##    df = pd.read_csv('test1.csv')
    db = sqlite3.connect('race.db')
    c = db.cursor()

    win = []
    lose = []
    quinella = []
    place = []
    none_flag = 0
    for race_id in range(27606, 34440):
        #    for race_id, sdf in df.groupby('race_id'):
        if (race_id % 100 == 1):
            print("finished ", race_id - 1)
        df = pd.read_sql("select horse_number,age,winrate,eps,odds,weight,preOOF,pre2OOF,preLastPhase, "\
                     "payoff_quinella,payoff_place, race_id "\
                     "from (select "\
                     "inputdata.race_id, "\
                     "inputdata.order_of_finish, "\
               "inputdata.horse_number horse_number, "\
               "age, "\
               "case when enterTimes != 0 then winRun/enterTimes else 0 end as winrate, "\
               "eps, "\
               "odds, "\
               "weight, "\
               "preOOF, "\
               "pre2OOF, "\
               "preLastPhase, "\
               "pay1.payoff payoff_quinella, "\
                     "pay2.payoff payoff_place "\
                  "from inputdata "\
                  "inner join payoff pay1 "\
                  "	on pay1.ticket_type = 3 and pay1.race_id = inputdata.race_id "\
                  "left join payoff pay2"\
                     "  on pay2.ticket_type = 1"\
                     "  and pay2.horse_number = inputdata.horse_number"\
                     "  and pay2.race_id = inputdata.race_id"\
                  ") as a "\
                     "where a.race_id = "+str(race_id)+" "\
                     "order by a.race_id,order_of_finish;", db)
        #    img_array=df.values.reshape(1,-1)[~np.isnan(df.values.reshape(1,-1))]
        #    img_array = model.xp.asarray(img_array, dtype=model.xp.float32).reshape(1,-1)
        arr = df.values

        #あいてるid用
        #        if(len(arr)==0):
        #            continue

        for i in range(len(arr)):
            if ((isinstance(arr[i][6], int)
                 or isinstance(arr[i][6], float)) == False):
                arr[i][6] = 18
            if ((isinstance(arr[i][7], int)
                 or isinstance(arr[i][7], float)) == False):
                arr[i][7] = 18
        arr = np.array(arr, dtype=float)
        #None処理
        for i in range(len(arr)):
            if (np.isnan(arr[i][10])):
                arr[i][10] = 0
        copy_arr = arr
        winner = arr[0][0]
        second = arr[1][0]
        winner_odds = arr[0][4]
        quinella_odds = arr[0][9]

        #none,nanがあるならとばす。
        for i in range(len(arr)):
            for j in range(len(arr[0])):
                if arr[i][j] is None:
                    none_flag = 1
                elif (math.isnan(float(arr[i][j]))):
                    none_flag = 1
        if (none_flag):
            none_flag = 0
            continue
        arr = arr.astype(np.float32)
        arr = scipy.stats.zscore(arr)
        #分散0の処理
        arr[np.isnan(arr)] = 0
        res = []
        for i in range(len(arr)):
            img_array = arr[i][0:9]
            img_array = model.xp.asarray(img_array,
                                         dtype=model.xp.float32).reshape(
                                             1, -1)
            with chainer.using_config('train',
                                      False), chainer.no_backprop_mode():
                result = model.predict(img_array)
            res.append(result.data[0])
    #        print("predict:", model.xp.argmax(result.data))
    #        arg_sorted = model.xp.argsort(result.data)
    #        arg_sorted = arg_sorted [:, ::-1]
    #        print(arg_sorted[:, :3])
        x = np.array(res).reshape((1, -1))[0]
        # 一着がほかより抜けている時のみ買う
        if ((x[np.argsort(x)[1]] - x[np.argsort(x)[0]]) < 0.001):
            continue
#        for i in range(len(x)):
#            print (np.argsort(x)[i]+1,"-", x[np.argsort(x)[i]])

# 一二着がほかより抜けている時のみ買う
#        if ((x[np.argsort(x)[2]] - x[np.argsort(x)[1]]) < 0.001):
#            continue

# 狙うオッズが微妙ならとばす。
        continue_flag = 0
        for j in range(len(copy_arr)):
            if (copy_arr[j][0] == np.argsort(x)[0] + 1):
                if (copy_arr[j][4] >= 50 or copy_arr[j][4] < 2):
                    continue_flag = 1
        if (continue_flag == 1):
            continue

        if (np.argsort(x)[0] + 1 == winner):
            win.append(winner_odds)


#            print(race_id,np.argsort(x)[0]+1,winner_odds)
        else:
            win.append(0)
            for j in range(len(copy_arr)):
                if (copy_arr[j][0] == np.argsort(x)[0] + 1):
                    lose.append(copy_arr[j][4])

        if (((np.argsort(x)[0] + 1 == winner) and
             (np.argsort(x)[1] + 1 == second))
                or ((np.argsort(x)[0] + 1 == second) and
                    (np.argsort(x)[1] + 1 == winner))):
            quinella.append(quinella_odds)
        else:
            quinella.append(0)
        for i in range(len(arr)):
            if (np.argsort(x)[0] + 1 == copy_arr[i][0]):
                place.append(copy_arr[i][10])
    print(win)
    print(lose)
    print(place)
    print(quinella)
    print("単勝")
    print("回収率 = ",
          sum(win) / len(win) * 100, " 的中率 = ",
          (1 - win.count(0) / len(win)) * 100)
    print("\n複勝")
    print("回収率 = ",
          sum(place) / len(place), " 的中率 = ",
          (1 - place.count(0) / len(place)) * 100)
    print("\n馬連")
    print("回収率 = ",
          sum(quinella) / len(quinella), " 的中率 = ",
          (1 - quinella.count(0) / len(quinella)) * 100)
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--batchsize', '-b', type=int, default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the mini_cifar to train')
    parser.add_argument('--frequency', '-f', type=int, default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit', '-u', type=int, default=1000,
                        help='Number of units')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MLP(args.unit, 10)
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST mini_cifar
    train, test = chainer.datasets.get_mnist()


    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
                                                 repeat=False, shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test mini_cifar for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(extensions.dump_graph('main/loss'))

    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'model_{.updater.epoch}'),
                   trigger=(frequency, 'epoch'))


    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch', file_name='accuracy.png'))

    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Ejemplo n.º 4
0
def main():
    db = sqlite3.connect('race.db')
    c = db.cursor()
    for i in range(10, 18):
        sql = "select * from inputdata where headCount = " + str(
            i) + " and race_id <= 27605 order by race_id,order_of_finish;"
        c.execute(sql)
        inputline = []  # [0,1,...,2]
        inputdata = []  # 完成inputデータ
        inputdataall = []
        count = 0
        label = []
        labels = []
        printflag = 1
        for row in c:
            row = list(row)
            if (isinstance(row[47], int) == False):
                row[47] = 18
            if (isinstance(row[48], int) == False):
                row[48] = 18
            if (count % i == 0):
                noneflag = 0
            for j in range(53):
                if (row[j] == None):
                    noneflag = 1
            inputline.append(row[3])
            inputline.append(row[35])
            try:
                inputline.append(row[46] / row[38])
            except:
                inputline.append(0)
            inputline.append(row[39])
            inputline.append(row[41])
            inputline.append(row[45])
            inputline.append(row[47])
            inputline.append(row[48])
            inputline.append(row[49])
            inputdata.append(inputline)
            inputline = []
            label.append(row[2])
            ##            if (count % i == 0):
            ##                label.append(0)
            ##                wintime = row[53]
            ##            else:
            ##                label.append(row[53] - wintime)
            if (count % i == i - 1):
                #            inputline.insert(0, label)
                if (noneflag == 0):
                    #                    dmean = np.array(inputdata).mean(axis=0, keepdims=True)
                    #                    dstd = np.std(inputdata, axis=0, keepdims=True)
                    #                    inputdata = (inputdata - dmean) / dstd
                    inputdata = scipy.stats.zscore(inputdata)
                    #分散0の処理
                    inputdata[np.isnan(inputdata)] = 0
                    inputdataall.extend(inputdata)
                    #                    lmean = np.mean(np.array(label),keepdims=True)
                    #                    lstd = np.std(label,keepdims=True)
                    horcenum = np.array([row[1]] * len(label))
                    labelnp = np.array(label) / horcenum
                    ##                    labelnp = np.array(label)
                    labels.extend(labelnp)
                inputdata = []
                label = []
            count = count + 1
        inputdataall2 = np.empty((len(inputdataall), len(inputdataall[0])))
        inputdataall2[:] = inputdataall
        inputdataall = inputdataall2
        #    print(inputdata2)
        #    print(inputdata)
        #    X = inputdata[:, 1:].astype(np.float32)
        if (i == 10):
            allX = np.array(inputdataall, dtype='float32')
            Y = np.array(labels, dtype='float32')
            #    le = LabelEncoder()
            #    allY = le.fit_transform(Y).astype(np.float32)
            allY = Y.astype(np.float32)
        else:
            X = np.array(inputdataall, dtype='float32')
            Y = np.array(labels, dtype='float32')
            #        le = LabelEncoder()
            #        Y = le.fit_transform(Y).astype(np.float32)
            Y = Y.astype(np.float32)
            allX = np.vstack((allX, X))
            allY = np.hstack((allY, Y))


#    print(X)
#    print(X[0])
#    print("-------")
#    print(Y[0].dtype)
#    print(Y[0])
#    print(Y[0])
#    Y=Y[:, None]

#    threshold = np.int32(len(inputdata) / 10 * 9)
#    train = np.array(inputdata[0:threshold],dtype=np.float32)
#    test = np.array(inputdata[threshold:],dtype=np.float32)
#    train = np.array(inputdata[0:threshold])
#    train = train.astype(np.float32)
#    test = np.array(inputdata[threshold:])
#    test = test.astype(np.float32)
    train, test = datasets.split_dataset_random(
        datasets.TupleDataset(allX, allY), int(inputdataall.shape[0] * .7))

    #全てtrainにぶちこむ
    train, test2 = datasets.split_dataset_random(
        datasets.TupleDataset(allX, allY),
        int(inputdataall.shape[0] * .999999))

    parser = argparse.ArgumentParser(description='Chainer example: RACE')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the mini_cifar to train')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MLP(args.unit, 1)
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam(weight_decay_rate=0.01)
    optimizer.setup(model)

    # Load the MNIST mini_cifar
    # train, test = chainer.datasets.get_mnist()

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test mini_cifar for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(extensions.dump_graph('main/loss'))

    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'model_{.updater.epoch}'),
                   trigger=(frequency, 'epoch'))

    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch',
                file_name='accuracy.png'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()