コード例 #1
0
if args.epoch >= 1:
    load_status = transcriber1.load_weights("%s/ckpt" % weights_dir)
    load_status.assert_existing_objects_matched()

transcriber2 = transcriberModel()
weights_dir = ("%s/Robot_Rainfall_Rescue/models/ATB2_retuned/" +
               "Epoch_%04d") % (
                   os.getenv("SCRATCH"),
                   int(args.epoch),
               )
if args.epoch < 50:
    load_status = transcriber2.load_weights("%s/ckpt" % weights_dir)
    load_status.assert_existing_objects_matched()

# Make the probability matrix
testImages = getImageDataset(purpose="test", nImages=args.nimages)
testNumbers = getNumbersDataset(purpose="test", nImages=args.nimages)
testData = tf.data.Dataset.zip((testImages, testNumbers))
# Need the size of the dataset
if args.nimages is None:
    nimages = sum(1 for _ in testData)
else:
    nimages = args.nimages

count = numpy.zeros(10)
pmatrix = numpy.zeros((10, 10))
dcount = 0
plmatrix = numpy.zeros((436))
origN = None
encN = None
for testCase in testData:
コード例 #2
0
                    default=0)
args = parser.parse_args()

# Set up the model and load the weights at the chosen epoch
transcriber = transcriberModel()
weights_dir = ("%s/Robot_Rainfall_Rescue/models/ATB2_retuned/Epoch_%04d") % (
    os.getenv("SCRATCH"),
    args.epoch - 1,
)
load_status = transcriber.load_weights("%s/ckpt" % weights_dir)
# Check the load worked
load_status.assert_existing_objects_matched()

# Get test case number args.image
testImage = getImageDataset(subdir="unperturbed",
                            purpose="test",
                            nImages=args.image + 1)
testImage = testImage.batch(1)
originalImage = next(itertools.islice(testImage, args.image, args.image + 1))
testNumbers = getNumbersDataset(subdir="unperturbed",
                                purpose="test",
                                nImages=args.image + 1)
testNumbers = testNumbers.batch(1)
originalNumbers = next(
    itertools.islice(testNumbers, args.image, args.image + 1))

# Run that test image through the transcriber
encoded = transcriber.predict_on_batch(originalImage)

# Plot original image on the left - make an image from the encoded numbers
#  on the right
コード例 #3
0
# How many epochs to train for
nEpochs = 200
# Length of an epoch - if None, same as nTrainingImages
nImagesInEpoch = 1000

if nImagesInEpoch is None:
    nImagesInEpoch = nTrainingImages

# Dataset parameters
bufferSize = 100  # Shouldn't make much difference (data is random)
batchSize = 2  # Arbitrary

# Set up the training data
imageData = getImageDataset(subdir="unperturbed",
                            purpose="training",
                            nImages=nTrainingImages).repeat()
numbersData = getNumbersDataset(subdir="unperturbed",
                                purpose="training",
                                nImages=nTrainingImages).repeat()
trainingData = tf.data.Dataset.zip((imageData, numbersData))
trainingData = trainingData.shuffle(bufferSize).batch(batchSize)

# Set up the test data
testImageData = getImageDataset(subdir="unperturbed",
                                purpose="test",
                                nImages=nTestImages).repeat()
testNumbersData = getNumbersDataset(subdir="unperturbed",
                                    purpose="test",
                                    nImages=nTestImages).repeat()
testData = tf.data.Dataset.zip((testImageData, testNumbersData))