Пример #1
0
for each in productLinkedList:
    print('|', end='')
    ll.append(getInfo(each[0], each[3], each[1]))
#saveAsCSV(ll, "上海数据开放")

#ll = []
for each in appLinkedList:
    print('|', end='')
    ll.append(getInfo(each[0], each[3], each[1]))
#saveAsCSV(ll, "上海应用开放")
print('抓取数据成功')

#classList = []
#for each in ll:
#    temp = EntryData()
#    temp.setFromString(each[-2],each[1], each[0], 'SH')
#    classList.append(temp.getData())
classList = []
for each in infoList:
    temp = Data(each[2], each[1], each[0], 'BJ')
    classList.append(temp)

print('数据分析中...')
saveAsPKL(classList, './data/SHEntry')
print('数据保存成功')

#保存数据连接条目
#saveAsPickle("shanghaiProduct.pkl", productLinkedList)
#saveAsPickle("shanghaiApp.pkl", appLinkedList)
#saveAsPickle("shanghaiInterface.pkl", interfaceLinkedList)
Пример #2
0
from dataClass import Data
from dataClass import ShangHai

a = Data("中国", "城市建设", "宅基地数据")
a.printInfo()

b = ShangHai("中国", "城市建设", "宅基地数据", "妈妈")
b.printInfo()
Пример #3
0
tf.compat.v1.disable_v2_behavior()
filePath = Path("C:/Users/soumi/Documents/Train-Images-All/").glob('*.jpg')
jsonPath = Path("C:/Users/soumi/Documents/Train-Images-All/trainData.txt")
savePath = "C:/Users/soumi/Documents/VAE-model/model/"
trainbatchSize = 16
valbatchSize = 16
epochs = 101
channelSize = 32
loss = 'ce_kldivergence'
learningRate = 0.0001
outputTensorName =  "Inference/Output"
imageHeight = 240
imageWidth = 320
trainingLossList = list()
validationLossList = list()
data = Data(filePath, jsonPath)
data.jsonData()
trainData = data.loadLabels()
print ("train data shape is", trainData.shape)

def getnumberofBatches(Datasize, batchSize):
    return int(Datasize/batchSize)

gpuInitialised = True
'''
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  # Restrict TensorFlow to only use the first GPU
  try:
    tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')