def generate_data(self): if self.train: left_data, right_data, label = get_data(True) return left_data, right_data, label else: left_data, right_data = get_data(False) return left_data, right_data
def create_test(): test_data = request.get_json() global test_ test_ = { "subject": test_data["subject"], "answer_keys": test_data['answer_keys'] } t.create_table() for i in test_.keys(): entities.append(test_[i]) t.insert_into_tests(entities) ret_data = t.get_data() ret_val = { "test_id": ret_data[0][0], "subject": ret_data[0][1], "answer_keys": eval(ret_data[0][2]), "submissions": ret_data[0][3] } return ret_val, 201
import os from test import output_scale_recover, get_data os.environ[ 'TF_CPP_MIN_LOG_LEVEL'] = '2' ##for avx command set in CPU :https://blog.csdn.net/hq86937375/article/details/79696023 from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout from keras import optimizers from keras.layers.normalization import BatchNormalization #ref:https://www.zhihu.com/question/55621104 from keras import initializers from keras import callbacks from keras.models import model_from_json np.random.seed(10) inputLen = 20 data = get_data() #interface variable name = "" from_data = 0 to_data = 1 #private: #user interface def main(): if len(sys.argv) < 7: # print( "Usage:", sys.argv[0], "--name <test name> --from <the index test_data from> --to <the index test_data to>"
from test import get_data print(get_data())
def params(): return str_data(get_data('StringReplace'))
def params(): return str_data(get_data('StringFind'))
def params(): return str_data(get_data('StringConcat'))
def params(): return str_data(get_data('StringEquals'))
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy import pylab import test as t # intial parameters z = t.get_data() n_iter = len(z) sz = (n_iter,) # size of array Q = 1e-2 # process variance # allocate space for arrays xhat=numpy.zeros(sz) # a posteri estimate of x P=numpy.zeros(sz) # a posteri error estimate xhatminus=numpy.zeros(sz) # a priori estimate of x Pminus=numpy.zeros(sz) # a priori error estimate K=numpy.zeros(sz) # gain or blending factor R = 0.2**2 # estimate of measurement variance, change to see effect # intial guesses xhat[0] = z[0] P[0] = 50.0 for k in range(1,n_iter): # time update xhatminus[k] = xhat[k-1] Pminus[k] = P[k-1]+Q
def params(): return int_data(get_data('GCD'))
def params(): return int_data(get_data('Loop'))
def params(): return int_data(get_data('FactorialBig'))
def post_tasks(): name = request.json['name'] return jsonify(get_data(name))
def params(): return long_data(get_data('MultiplicationOfBigInt'))
def params(): return long_data(get_data('MultiplicationOfLong'))
def params(): return int_data(get_data('MultiplicationOfInt'))