示例#1
0
def process(task_id):
    # get meta info
    meta = TaskMeta.from_redis(task_id)
    logging.info('[process] msg: %s', task_id)
    if not meta:
        logging.error('[process] get from_redis failed, task_id: %s', task_id)
        return ERR_READ_REDIS_FAILED

    # inference
    result_img = inference(meta.input_url)

    # upload result
    obj_name = upload_img(meta.task_id, result_img)
    logging.info('[process] upload_img: %s, obj_name: %s', task_id, obj_name)
    if not obj_name:
        logging.error('[process] upload_img failed, task_id: %s', task_id)
        return ERR_WRITE_CLOUD_STORAGE_FAILED
    output_url = URL % obj_name

    # update meta info
    meta.output_url = output_url
    meta.proc_status = TaskMeta.TASK_STATUS_SUC
    logging.info('[process] meta to update: %s', meta.to_dict())
    if not meta.update():
        logging.error('[process] meta.update failed, task_id: %s', task_id)
        return ERR_WRITE_REDIS_FAILED
    logging.info('[process] done task: %s', task_id)
    return SUC
示例#2
0
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.3)
scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.num_epochs*len(data_loader))
lr_scheduler = GradualWarmupScheduler(optimizer, multiplier=100, 
                                      total_epoch=min(1000, len(data_loader)-1), 
                                      after_scheduler=scheduler_cosine)

# loss function
criterion = DiceLoss()
# criterion = Weight_Soft_Dice_Loss(weight=[0.1, 0.9])
# criterion = BCELoss()
# criterion = MixedLoss(10.0, 2.0)
# criterion = Weight_BCELoss(weight_pos=0.25, weight_neg=0.75)
# criterion = Lovasz_Loss(margin=[1, 5]

print('start training...')
train_start = time.time()
for epoch in range(config.num_epochs):
    epoch_start = time.time()
    model_ft, optimizer = train_one_epoch(model_ft, data_loader, criterion, 
                                          optimizer, lr_scheduler=lr_scheduler, device=device, 
                                          epoch=epoch, vis=vis)
    do_valid(model_ft, dataloader_val, criterion, epoch, device, vis=vis)
    print('Epoch time: {:.3f}min\n'.format((time.time()-epoch_start)/60/60))

print('total train time: {}hours {}min'.format(int((time.time()-train_start)/60//60), int((time.time()-train_start)/60%60)))
inference_all(model_ft, device=device)
inference(model_ft, device=device)
torch.save(model_ft, f'{config.model}.pth')
torch.save({'optimizer': optimizer.state_dict(),
            'epoch': epoch,}, 
            'optimizer.pth')
示例#3
0
from torch.utils.data import DataLoader, Dataset
from dataset import Image_Dataset
from sklearn.decomposition import KernelPCA
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans, KMeans
from preprocess import preprocess
import torchvision.transforms as transforms
from test import inference, predict, save_prediction, invert

#load model
model = Improved_AE().cuda()
model.load_state_dict(torch.load(sys.argv[2]))
model.eval()

# 準備 data
trainX = np.load(sys.argv[1])

# 預測答案
latents = inference(X=trainX, model=model)
pred, X_embedded = predict(latents)

# 將預測結果存檔,上傳 kaggle
if pred[6] == 1:
    save_prediction(pred, sys.argv[3])

# 由於是 unsupervised 的二分類問題,我們只在乎有沒有成功將圖片分成兩群
# 如果上面的檔案上傳 kaggle 後正確率不足 0.5,只要將 label 反過來就行了
else:
    save_prediction(invert(pred), sys.argv[3])

示例#4
0
import numpy as np
import torch
from test import inference, predict
from utils import cal_acc, plot_scatter
from autoencoder import Base_AE, Improved_AE
from preprocess import preprocess

valX = np.load('data/valX.npy')
valY = np.load('data/valY.npy')

#model = Base_AE().cuda()
model = Improved_AE().cuda()
model.load_state_dict(torch.load('./improve-2.pth'))

model.eval()
latents = inference(valX, model)
pred_from_latent, emb_from_latent = predict(latents)
acc_latent = cal_acc(valY, pred_from_latent)
print('The clustering accuracy is:', acc_latent)
print('The clustering result:')
plot_scatter(emb_from_latent, valY, savefig='p1_improved.png')
'''

import matplotlib.pyplot as plt
import numpy as np

# 畫出原圖
trainX = np.load('data/trainX_new.npy')
trainX_preprocessed = preprocess(trainX)
model = Improved_AE().cuda()
model.load_state_dict(torch.load('./improve-2.pth'))
示例#5
0
import numpy as np

if __name__=='__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-src', required=True)
    parser.add_argument('-model', required=True, help='path to pre-trained model')
    parser.add_argument('-output', required=True, help='path to store the output')
    parser.add_argument('-batch', type=int, default=32, required=True, help='size of a batch')
    opt = parser.parse_args()

    dataset=make_src_dataset(opt.src, opt.batch)
    iterator=dataset.make_initializable_iterator()
    src_input,src_size=iterator.get_next()

    #定义计算图
    sequence_op = inference(src_input,src_size)
    saver = tf.train.Saver()
    result = []

    with tf.Session() as sess:
        saver.restore(sess, opt.model)
        sess.run(iterator.initializer)
        print('[INFO] Start inference process...')
        while True:
            step = 0
            try:
                sequence = sess.run(sequence_op).tolist()
                step += 1
                if (step * opt.batch) % 100 ==0:
                    print('{} test instances finished.'.format(step*opt.batch))
                for seq in sequence:
示例#6
0
def upload():
   global shot_type
   global destination
   global batsman
   if request.method == 'POST':
      f = request.files['video-input']
      # This will contain the name of the batsman selected form the dropdown
      action_type = request.form['action-type']
      print("Action type is:", action_type)
      batsman = request.form['batsman']
      # This will contain the path of the file selected
      destination = "\\".join([target, f.filename])
      f.save(destination)
      print(destination)
 
      if action_type == 'analyze':
          # Analyze logic goes here
          preprocess(destination)
          
          bowl_ids=inference()
          #return str(destination)+" "+str(batsman)

           #changing variables
          bm=batsman
          bi=bowl_ids

          print("initial bowling ids predicted by model",bi)
          x=bi[0]#1st bowl id
          y=bi[1]#2nd bowl id

          #after manipulation
          a=map[x]#1st bowl id
          b=map[y]#2nd bowl # IDEA:

          bi[0]=a
          bi[1]=b
          print("final bowling ids after manipulation according to dataset",bi)

          #most played shot
          mps=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
          mps=mps.mode()['Shot_Played'][0]

          #ideal shot
          ids=df.loc[ (df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1])]
          ids=ids.groupby('Shot_Played').mean().reset_index()
          ids=ids.iloc[ids["Boundary_Success_Rate"].idxmax()]
          ids=ids["Shot_Played"]

          #avg power
          ap=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
          ap=ap["Power_Applied(N)"].mean()


          #avg reaction time
          art=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
          art=art["Reaction Time(sec)"].mean()

          #best refrence
          br=df.loc[ (df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1])]
          br=br.groupby('Batsman').mean().reset_index()
          br=br.iloc[br["Boundary_Success_Rate"].idxmax()]
          br=br["Batsman"]
          #return str(destination)+" "+str(batsman)

          stats = {
            'Most Played Shot': mps,
            'Ideal Shot': ids,
            'Average Power': ap,
            'Average Reaction Time': art,
            'Best Refrence': br,
            }
          '''
          stats = {
            'Most Played Shot': 'Cover drive',
            'Ideal Shot': 'Sixer',
            'Average Power': '12 W',
            'Average Reaction Time': '100 ms'
            }
          '''
          dir_path = 'C:/Users/Administrator/Desktop/Ankita/uploads/'
          try:
              shutil.rmtree(dir_path)
          except OSError as e:
              print("Error: %s : %s" % (dir_path, e.strerror))
          return render_template('stats.html', batsman = batsman, stats = stats)
    
      elif action_type == 'coachme':
          print(batsman)
          print(shot_type)
          # Coach me logic goes here
          Cover_drive=[[-0.5, 1.2, -9.1,0,-1,0],[-0.55, 1.2, -8.5,0,-1,0],[-0.58, 1.2, -8.3,0,-1,0],[-0.6, 1.2, -8.0,0,-1,0],[-0.65, 1.2, -7.8,-15,-1,0],[-0.68, 1.2, -7.7,-19,-1,0],[-0.7, 1.2, -7.5,-20,-1,0],[-0.72, 1.2, -7.3,-25,-1,0]]
          Straight_drive=[[-0.5, 1.2, -9.1,0,0,0],[-0.5, 1.2, -8.5,0,0,0],[-0.5, 1.2, -8.3,0,0,0],[-0.5, 1.2, -8.0,0,0,0],[-0.5, 1.2, -7.8,0,0,0],[-0.5, 1.2, -7.7,-20,0,0],[-0.5, 1.2, -7.5,-29,0,0],[-0.5, 1.2, -7.3,-30,0,0]]
          if batsman=="Hari":
            shot_type=Straight_drive
          if batsman=="Vivek":
            shot_type=Cover_drive
          return render_template('coach.html',shot_type=shot_type)
 
      print("Batsman:", batsman)
      print("Filename", destination)
 
      return render_template('index.html', batsmen=batsmen, len=len(batsmen), batsman=batsman, destination=destination)
示例#7
0
def analyse():
    global selected_player
    global selected_delivery
    global destination
    global batsman

    if request.method == 'POST':
        print(request.json)
        selected_player = request.json['player']
        selected_delivery = request.json['delivery']
        print("Selected player in POST is: ", selected_player)
        print("Player details: ", selected_player['id'], selected_player['name'], selected_player['imageUrl'], selected_player['isCaptain'])
        print("Delivery details: ", selected_delivery['id'], selected_delivery['name'], selected_delivery['videoUrl'])
        
        # Player and delivery details are available in the variable above, as described
        resp = jsonify(success=True)
        return resp

    if request.method == 'GET':
        batsman=selected_player['name']
        original="C:/Users/Administrator/Desktop/Cricket_new_UI/" + selected_delivery['videoUrl']
        shutil.copyfile(original, 'C:/Users/Administrator/Desktop/Ankita/uploads/test.webm')

        print("Selected player in GET is: ", selected_player)
        print("Selected delivery in GET is: ", selected_delivery['videoUrl'])
        destination = "C:\\Users\\Administrator\\Desktop\\Ankita\\uploads\\test.webm"

        # Analyze logic goes here
        preprocess(destination)
          
        bowl_ids=inference()
        #return str(destination)+" "+str(batsman)

        #changing variables
        bm=batsman
        bi=bowl_ids

        print("initial bowling ids predicted by model",bi)
        x=bi[0]#1st bowl id
        y=bi[1]#2nd bowl id

        #after manipulation
        a=map[x]#1st bowl id
        b=map[y]#2nd bowl # IDEA:

        bi[0]=a
        bi[1]=b
        print("final bowling ids after manipulation according to dataset",bi)

        #most played shot
        mps=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
        mps=mps.mode()['Shot_Played'][0]

        #ideal shot
        ids=df.loc[ (df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1])]
        ids=ids.groupby('Shot_Played').mean().reset_index()
        ids=ids.iloc[ids["Boundary_Success_Rate"].idxmax()]
        ids=ids["Shot_Played"]

        #avg power
        ap=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
        ap=ap["Power_Applied(N)"].mean()


        #avg reaction time
        art=df.loc[(df['Batsman'] == bm) & ((df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1]))]
        art=art["Reaction Time(sec)"].mean()

        #best refrence
        br=df.loc[ (df['Bowl_Id'] == bi[0]) | (df['Bowl_Id'] == bi[1])]
        br=br.groupby('Batsman').mean().reset_index()
        br=br.iloc[br["Boundary_Success_Rate"].idxmax()]
        br=br["Batsman"]
        #return str(destination)+" "+str(batsman)

        stats = {
            'Most Played Shot': mps,
            'Ideal Shot': ids,
            'Average Power': ap,
            'Average Reaction Time': art,
            'Best Refrence': br,
        }
        '''
        stats = {
        'Most Played Shot': 'Cover drive',
        'Ideal Shot': 'Sixer',
        'Average Power': '12 W',
        'Average Reaction Time': '100 ms'
        }
        '''
        dir_path = 'C:/Users/Administrator/Desktop/Ankita/uploads/'
        try:
            shutil.rmtree(dir_path)
        except OSError as e:
            print("Error: %s : %s" % (dir_path, e.strerror))
        try:
            os.makedirs(dir_path)
        except OSError as e:
            pass
        return render_template('stats.html', batsman = batsman, stats = stats)
示例#8
0
#dataset = Image_Dataset(trainX, test_transform)
dataloader = DataLoader(dataset, batch_size=64, shuffle=False)
model = Improved_AE().cuda()

points = []
with torch.no_grad():
    for i, checkpoint in enumerate(checkpoints_list):
        print('[{}/{}] {}'.format(i + 1, len(checkpoints_list), checkpoint))
        model.load_state_dict(torch.load(checkpoint))
        model.eval()
        err = 0
        n = 0
        for x in dataloader:
            x = x.cuda()
            _, rec = model(x)
            err += torch.nn.MSELoss(reduction='sum')(x, rec).item()
            n += x.flatten().size(0)
        print('Reconstruction error (MSE):', err / n)
        latents = inference(X=valX, model=model)
        pred, X_embedded = predict(latents)
        acc = cal_acc(valY, pred)
        print('Accuracy:', acc)
        points.append((err / n, acc))
'''
ps = list(zip(*points))
plt.figure(figsize=(6,6))
plt.subplot(211, title='Reconstruction error (MSE)').plot(epoch, ps[0])
plt.subplot(212, title='Accuracy (val)', xlabel='Epoch').plot(epoch, ps[1])
plt.show()
'''
def train():
    x = tf.placeholder(tf.float32,
                       shape=[
                           BATCH_SIZE, test.IMAGE_SIZE, test.IMAGE_SIZE,
                           test.NUM_CHANNELS
                       ],
                       name='x_input')
    y_ = tf.placeholder(tf.float32,
                        shape=[BATCH_SIZE, test.OUTPUT_NODE],
                        name='y_input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = test.inference(x, train, regularizer=regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,
                                               TRAINING_STEPS / BATCH_SIZE,
                                               LEARNING_RATE_DECAY)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='Train')

    #初始化tensorflow持久化类
#    saver=tf.train.Saver()

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        ####--------------------------设置BATCH_SIZE------------------------####
        for i in range(TRAINING_STEPS):
            ID = np.random.randint(0, 199, BATCH_SIZE)
            xs = test.train_data[ID]
            ys = test.train_label[ID]
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            if i % 50 == 0:
                print(
                    "After %d steps training steps,loss on training batch is %g"
                    % (step, loss_value))


#                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)

    ypre = sess.run(y, feed_dict={x: test_data})

    y1 = np.argmax(ypre)
    y2 = np.argmax(test_label)

    accuracy = np.sum(y1 - y2)
    accuracy = accuracy / numtest

    print('Accuracy_learn ============>>  ', accuracy)
示例#10
0
def test():
    input_url = 'https://storage.googleapis.com/ylq_server/208e8322d7ecd9f48c969be0c95b333e'
    result_img = inference(input_url)
    # upload result
    result_img.save('/tmp/res', 'JPEG')