Example #1
0
 def run_experiment(self, repeats=10):
     # load data
     Preprocesser(self.length).preprocess()
     Extractor(self.length).extract()
     trainX, trainy, testX, testy = self.load_dataset()
     # repeat experiment
     scores = list()
     for r in range(repeats):
         score = self.evaluate_model(trainX, trainy, testX, testy)
         score = score * 100.0
         print('>#%d: %.3f' % (r + 1, score))
         scores.append(score)
     # summarize results
     return self.summarize_results(scores)
Example #2
0
def main():
    with open(config.configFile(), "a+") as outFile:
        json.dump(vars(config), outFile)

    # set gpus
    if config.gpus != "":
        config.gpusNum = len(config.gpus.split(","))
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus

    tf.logging.set_verbosity(tf.logging.ERROR)

    # process data
    print(bold("Preprocess data..."))
    start = time.time()
    preprocessor = Preprocesser()
    data, embeddings, answerDict = preprocessor.preprocessData()
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))

    # build model
    print(bold("Building model..."))
    start = time.time()
    model = MACnet(embeddings, answerDict, raw=config.raw_image)
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))

    # initializer
    init = tf.global_variables_initializer()

    # savers
    savers = setSavers(model)
    saver, emaSaver, resnet_saver = \
        savers["saver"], savers["emaSaver"], savers['resnet_saver']

    # sessionConfig
    sessionConfig = setSession()

    with tf.Session(config=sessionConfig) as sess:

        # ensure no more ops are added after model is built
        sess.graph.finalize()

        # restore / initialize weights, initialize epoch variable
        epoch = loadWeights(sess, saver, init, resnet_saver)

        if config.train:
            start0 = time.time()

            bestEpoch = epoch
            bestRes = None
            prevRes = None

            # epoch in [restored + 1, epochs]
            for epoch in range(config.restoreEpoch + 1, config.epochs + 1):
                print(bcolored("Training epoch {}...".format(epoch), "green"))
                start = time.time()

                # train
                # calle = lambda: model.runEpoch(), collectRuntimeStats, writer
                trainingData, alterData = chooseTrainingData(data)
                trainRes = runEpoch(sess,
                                    model,
                                    trainingData,
                                    train=True,
                                    epoch=epoch,
                                    saver=saver,
                                    alterData=alterData,
                                    raw=config.raw_image)

                # save weights
                saver.save(sess, config.weightsFile(epoch))
                if config.saveSubset:
                    subsetSaver.save(sess, config.subsetWeightsFile(epoch))

                # load EMA weights
                if config.useEMA:
                    print(bold("Restoring EMA weights"))
                    emaSaver.restore(sess, config.weightsFile(epoch))

                # evaluation
                evalRes = runEvaluation(sess,
                                        model,
                                        data["main"],
                                        epoch,
                                        raw=config.raw_image)
                extraEvalRes = runEvaluation(sess,
                                             model,
                                             data["extra"],
                                             epoch,
                                             evalTrain=not config.extraVal,
                                             raw=config.raw_image)

                # restore standard weights
                if config.useEMA:
                    print(bold("Restoring standard weights"))
                    saver.restore(sess, config.weightsFile(epoch))

                print("")

                epochTime = time.time() - start
                print("took {:.2f} seconds".format(epochTime))

                # print results
                printDatasetResults(trainRes, evalRes, extraEvalRes)

                # stores predictions and optionally attention maps
                if config.getPreds:
                    print(bcolored("Writing predictions...", "white"))
                    writePreds(preprocessor, evalRes, extraEvalRes)

                logRecord(epoch, epochTime, config.lr, trainRes, evalRes,
                          extraEvalRes)

                # update best result
                # compute curr and prior
                currRes = {
                    "train": trainRes,
                    "val": evalRes["val"],
                    'test': evalRes['test']
                }
                curr = {"res": currRes, "epoch": epoch}

                if bestRes is None or better(currRes, bestRes):
                    bestRes = currRes
                    bestEpoch = epoch

                prior = {
                    "best": {
                        "res": bestRes,
                        "epoch": bestEpoch
                    },
                    "prev": {
                        "res": prevRes,
                        "epoch": epoch - 1
                    }
                }

                # lr reducing
                if config.lrReduce:
                    if not improveEnough(curr, prior, config.lr):
                        config.lr *= config.lrDecayRate
                        print(
                            colored("Reducing LR to {}".format(config.lr),
                                    "red"))

                # early stopping
                if config.earlyStopping > 0:
                    if epoch - bestEpoch > config.earlyStopping:
                        break

                # update previous result
                prevRes = currRes

            # reduce epoch back to the last one we trained on
            epoch -= 1
            print("Training took {:.2f} seconds ({:} epochs)".format(
                time.time() - start0, epoch - config.restoreEpoch))

        if config.finalTest:
            print("Testing on epoch {}...".format(epoch))

            start = time.time()
            if epoch > 0:
                if config.useEMA:
                    emaSaver.restore(sess, config.weightsFile(epoch))
                else:
                    saver.restore(sess, config.weightsFile(epoch))

            evalRes = runEvaluation(sess,
                                    model,
                                    data["main"],
                                    epoch,
                                    evalTest=True,
                                    raw=config.raw_image)
            extraEvalRes = runEvaluation(sess,
                                         model,
                                         data["extra"],
                                         epoch,
                                         evalTrain=not config.extraVal,
                                         evalTest=True,
                                         raw=config.raw_image)

            print("took {:.2f} seconds".format(time.time() - start))
            printDatasetResults(None, evalRes, extraEvalRes)

            print("Writing predictions...")
            writePreds(preprocessor, evalRes, extraEvalRes)

        print(bcolored("Done!", "white"))
Example #3
0
from classification_lstm import lstmClassifier
from preprocess import Preprocesser
import matplotlib.pyplot as plt

acc_lstm = []

STEP = 10
START = 30
END = 500

for i in [10, 100, 500, 750, 1000]:
    print("The length is ", i)
    Preprocesser(i).preprocess()
    #Extractor(i).extract()
    acc_lstm.append(lstmClassifier(i).run_experiment())

plt.plot([10, 100, 500, 750, 1000], acc_lstm, color="red", label="LSTM")

plt.legend()

plt.savefig('plot_lstm',
            dpi=600)  #plt.savefig()将输出图形存储为文件,默认为png格式,可以通过dpi修改输出质量
plt.show()
Example #4
0
def main():
    # Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
    url = os.environ.get('CLOUDAMQP_URL')
    params = pika.URLParameters(url)
    connection = pika.BlockingConnection(params)
    channel = connection.channel()
    channel.queue_declare(queue='rpc_queue')

    ################################ input ################################
    global cap
    if config.input_video == "camera":
        # camera input
        cap = cv2.VideoCapture(0)
        H, W = 480, 640
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, W)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, H)
    else:
        # static image
        cap = config.input_video

    ################################ feature extraction in PyTorch ################################
    global fextract_model
    fextract_model = build_model()

    ################################ MAC model in TF ################################
    global preprocessor, sess, mac_model
    with open(config.configFile(), "a+") as outFile:
        json.dump(vars(config), outFile)
    # set gpus
    if config.gpus != "":
        config.gpusNum = len(config.gpus.split(","))
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
    tf.logging.set_verbosity(tf.logging.ERROR)
    assert config.batchSize == 1, 'Error: always batch size of one'
    # process data
    print(bold("Preprocess data..."))
    start = time.time()
    preprocessor = Preprocesser()
    data, embeddings, answerDict = preprocessor.preprocessData(hasTrain=False,
                                                               hasDebug=True)
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))
    # build model
    print(bold("Building model..."))
    start = time.time()
    mac_model = MACnet(embeddings, answerDict)
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))
    # initializer
    init = tf.global_variables_initializer()
    # savers
    savers = setSavers(mac_model)
    saver, emaSaver = savers["saver"], savers["emaSaver"]
    # sessionConfig
    sessionConfig = setSession()
    sess = tf.Session(config=sessionConfig)
    # ensure no more ops are added after model is built
    sess.graph.finalize()
    # restore / initialize weights, initialize epoch variable
    epoch = loadWeights(sess, saver, init)
    print("Testing on epoch {}...".format(epoch))
    if epoch > 0:
        if config.useEMA:
            emaSaver.restore(sess, config.weightsFile(epoch))
        else:
            saver.restore(sess, config.weightsFile(epoch))
    ################################ RPC Queue ################################
    channel.basic_qos(prefetch_count=1)
    channel.basic_consume(queue='rpc_queue', on_message_callback=on_request)

    print(" [x] Awaiting RPC requests")
    try:
        channel.start_consuming()
    except KeyboardInterrupt:
        channel.stop_consuming()

    connection.close()
    sess.close()
    if config.input_video == "camera":
        cap.release()
Example #5
0
def main():
    with open(config.configFile(), "a+") as outFile:
        json.dump(vars(config), outFile)

    tf.set_random_seed(config.tfseed)

    # set gpus
    if config.gpus != "":
        config.gpusNum = len(config.gpus.split(","))
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus

    tf.logging.set_verbosity(tf.logging.ERROR)

    # process data
    print(bold("Preprocess data..."))
    start = time.time()
    preprocessor = Preprocesser()
    data, embeddings, answerDict, questionDict = preprocessor.preprocessData()
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))

    nextElement = None
    dataOps = None

    # build model
    print(bold("Building model..."))
    start = time.time()
    model = MACnet(embeddings, answerDict, questionDict, nextElement)
    print("took {} seconds".format(
        bcolored("{:.2f}".format(time.time() - start), "blue")))

    # initializer
    init = tf.global_variables_initializer()

    # savers
    savers = setSavers(model)
    saver, emaSaver = savers["saver"], savers["emaSaver"]

    # sessionConfig
    sessionConfig = setSession()

    with tf.Session(config=sessionConfig) as sess:

        # ensure no more ops are added after model is built
        sess.graph.finalize()

        # restore / initialize weights, initialize epoch variable
        epoch = loadWeights(sess, saver, init)

        trainRes, evalRes = None, None

        if config.train:
            start0 = time.time()

            bestEpoch = epoch
            bestRes = None
            prevRes = None

            # epoch in [restored + 1, epochs]
            for epoch in range(config.restoreEpoch + 1, config.epochs + 1):
                print(bcolored("Training epoch {}...".format(epoch), "green"))
                start = time.time()

                # train
                # calle = lambda: model.runEpoch(), collectRuntimeStats, writer
                trainingData, alterData = chooseTrainingData(data)
                trainRes = runEpoch(
                    sess,
                    model,
                    trainingData,
                    dataOps,
                    train=True,
                    epoch=epoch,
                    saver=saver,
                    alterData=alterData,
                    maxAcc=trainRes["maxAcc"] if trainRes else 0.0,
                    minLoss=trainRes["minLoss"] if trainRes else float("inf"),
                )

                # save weights
                saver.save(sess, config.weightsFile(epoch))
                if config.saveSubset:
                    subsetSaver.save(sess, config.subsetWeightsFile(epoch))

                # load EMA weights
                if config.useEMA:
                    print(bold("Restoring EMA weights"))
                    emaSaver.restore(sess, config.weightsFile(epoch))

                # evaluation
                getPreds = config.getPreds or (config.analysisType != "")

                evalRes = runEvaluation(sess,
                                        model,
                                        data["main"],
                                        dataOps,
                                        epoch,
                                        getPreds=getPreds,
                                        prevRes=evalRes)
                extraEvalRes = runEvaluation(sess,
                                             model,
                                             data["extra"],
                                             dataOps,
                                             epoch,
                                             evalTrain=not config.extraVal,
                                             getPreds=getPreds)

                # restore standard weights
                if config.useEMA:
                    print(bold("Restoring standard weights"))
                    saver.restore(sess, config.weightsFile(epoch))

                print("")

                epochTime = time.time() - start
                print("took {:.2f} seconds".format(epochTime))

                # print results
                printDatasetResults(trainRes, evalRes, extraEvalRes)

                # stores predictions and optionally attention maps
                if config.getPreds:
                    print(bcolored("Writing predictions...", "white"))
                    writePreds(preprocessor, evalRes, extraEvalRes)

                logRecord(epoch, epochTime, config.lr, trainRes, evalRes,
                          extraEvalRes)

                # update best result
                # compute curr and prior
                currRes = {"train": trainRes, "val": evalRes["val"]}
                curr = {"res": currRes, "epoch": epoch}

                if bestRes is None or better(currRes, bestRes):
                    bestRes = currRes
                    bestEpoch = epoch

                prior = {
                    "best": {
                        "res": bestRes,
                        "epoch": bestEpoch
                    },
                    "prev": {
                        "res": prevRes,
                        "epoch": epoch - 1
                    }
                }

                # lr reducing
                if config.lrReduce:
                    if not improveEnough(curr, prior, config.lr):
                        config.lr *= config.lrDecayRate
                        print(
                            colored("Reducing LR to {}".format(config.lr),
                                    "red"))

                # early stopping
                if config.earlyStopping > 0:
                    if epoch - bestEpoch > config.earlyStopping:
                        break

                # update previous result
                prevRes = currRes

            # reduce epoch back to the last one we trained on
            epoch -= 1
            print("Training took {:.2f} seconds ({:} epochs)".format(
                time.time() - start0, epoch - config.restoreEpoch))

        if config.finalTest:
            print("Testing on epoch {}...".format(epoch))

            start = time.time()
            if epoch > 0:
                if config.useEMA:
                    emaSaver.restore(sess, config.weightsFile(epoch))
                else:
                    saver.restore(sess, config.weightsFile(epoch))

            evalRes = runEvaluation(sess,
                                    model,
                                    data["main"],
                                    dataOps,
                                    epoch,
                                    evalTest=False,
                                    getPreds=True)
            extraEvalRes = runEvaluation(sess,
                                         model,
                                         data["extra"],
                                         dataOps,
                                         epoch,
                                         evalTrain=not config.extraVal,
                                         evalTest=False,
                                         getPreds=True)

            print("took {:.2f} seconds".format(time.time() - start))
            printDatasetResults(trainRes, evalRes, extraEvalRes)

            print("Writing predictions...")
            writePreds(preprocessor, evalRes, extraEvalRes)

        if config.interactive:
            if epoch > 0:
                if config.useEMA:
                    emaSaver.restore(sess, config.weightsFile(epoch))
                else:
                    saver.restore(sess, config.weightsFile(epoch))

            tier = config.interactiveTier
            images = data["main"][tier]["images"]

            imgsInfoFilename = config.imgsInfoFile(tier)
            with open(imgsInfoFilename, "r") as file:
                imageIndex = json.load(file)

            openImageFiles(images)

            resInter = {"preds": []}

            while True:

                text = inp("Enter <imageId>_<question>\n")
                if len(text) == 0:
                    break

                imageId, questionStr = text.split("_")

                imageInfo = imageIndex[imageId]

                imageId = {
                    "group": tier,
                    "id": imageId,
                    "idx": imageInfo["idx"]
                }  # int(imageId)
                question = preprocessor.encodeQuestionStr(questionStr)
                instance = {
                    "questionStr": questionStr,
                    "question": question,
                    "answer": "yes",  # Dummy answer
                    "answerFreq": ["yes"],  # Dummy answer
                    "imageId": imageId,
                    "tier": tier,
                    "index": 0
                }

                if config.imageObjects:
                    instance["objectsNum"] = imageInfo["objectsNum"]

                print(instance)

                datum = preprocessor.vectorizeData([instance])
                image = loadImageBatch(images, {"imageIds": [imageId]})
                res = model.runBatch(sess,
                                     datum,
                                     image,
                                     train=False,
                                     getPreds=True,
                                     getAtt=True)
                resInter["preds"].append(instance)

                print(instance["prediction"])

            if config.getPreds:
                print(bcolored("Writing predictions...", "white"))
                preprocessor.writePreds(resInter, "interactive".format())

            closeImageFiles(images)

        print(bcolored("Done!", "white"))
Example #6
0
#loadDatasetConfig["GQA"]()
with open(config.configFile(), "a+") as outFile:
    json.dump(vars(config), outFile)

# set gpus
# if config.gpus != "":
#     config.gpusNum = len(config.gpus.split(","))
#     os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus

tf.logging.set_verbosity(tf.logging.ERROR)

# process data
print(bold("Preprocess data..."))
start = time.time()
preprocessor = Preprocesser()
data, embeddings, answerDict, questionDict = preprocessor.preprocessData()
print("took {} seconds".format(
    bcolored("{:.2f}".format(time.time() - start), "blue")))

nextElement = None
dataOps = None

# build model
print(bold("Building model..."))
start = time.time()
model = MACnet(embeddings, answerDict, questionDict, nextElement)
print("took {} seconds".format(
    bcolored("{:.2f}".format(time.time() - start), "blue")))

# initializer
Example #7
0
                mg[0], forward, reverse))
        mtools.divide_fq(mg[0], forward, reverse)
        mg = [forward, reverse]
            
    mg_name = mg[0].split('/')[-1].split('_R')[0]
    
    if mg_name not in mg_processed:                                             #several MT samples might correspond to the same MG sample

        '''    
        Metagenomics Preprocess        
        '''
        if not args.no_preprocessing:
            mtools.timed_message('Preprocessing metagenomic reads')
            preprocesser = Preprocesser(files = mg,
                                        paired = 'PE' if args.sequencing_technology == 'paired' else 'SE',
                                        working_dir = args.output,
                                        data = 'dna',
                                        name = mg_name,
                                        threads = args.threads)
            if hasattr(args, 'quality_score'):
                setattr(preprocesser, 'quality_score', args.quality_score)
                
            preprocesser.run()
            
            mtools.task_is_finished(task = 'Preprocessing',
                    file = monitorization_file, 
                    task_output = args.output + '/Preprocess')
            
            mg = [args.output + '/Preprocess/Trimmomatic/quality_trimmed_' + mg_name + 
                  '_' + fr + '_paired.fq' for fr in ['forward', 'reverse']]
        
        '''
Example #8
0
def main():
    with open(config.configFile(), "a+") as outFile:
        json.dump(vars(config), outFile)

    # set gpus
    if config.gpus != "":
        config.gpusNum = len(config.gpus.split(","))
        os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus

    tf.logging.set_verbosity(tf.logging.ERROR)

    assert config.batchSize == 1, 'Error: always batch size of one'
    # process data
    print(bold("Preprocess data..."))
    start = time.time()
    preprocessor = Preprocesser()
    data, embeddings, answerDict = preprocessor.preprocessData(hasTrain = False, hasDebug = True)
    print("took {} seconds".format(bcolored("{:.2f}".format(time.time() - start), "blue")))

    # build model
    print(bold("Building model..."))
    start = time.time()
    model = MACnet(embeddings, answerDict)
    print("took {} seconds".format(bcolored("{:.2f}".format(time.time() - start), "blue")))

    # initializer
    init = tf.global_variables_initializer()

    # savers
    savers = setSavers(model)
    saver, emaSaver = savers["saver"], savers["emaSaver"]

    # sessionConfig
    sessionConfig = setSession()
    
    with tf.Session(config = sessionConfig) as sess:

        # ensure no more ops are added after model is built
        sess.graph.finalize()

        # restore / initialize weights, initialize epoch variable
        epoch = loadWeights(sess, saver, init)

        print("Testing on epoch {}...".format(epoch))
        
        start = time.time()
        if epoch > 0:
            if config.useEMA:
                emaSaver.restore(sess, config.weightsFile(epoch))
            else:
                saver.restore(sess, config.weightsFile(epoch))

        #evalRes = runEvaluation(sess, model, data["main"], epoch, evalTest = False, evalDebug = True)
        #####################################################
        imagesFilename = './FRIDGR_v0.1/data/debug.h5'
        datasetFilename = './FRIDGR_v0.1/data/FRIDGR_debug_questions.json'
        images = {"imagesFilename": imagesFilename}
        q = ["Do I have meat?",
            "How many meats?",
            "Do I have egg?",
            "Do I have donut?",
            "Do I have beer?"]
        a = [True, 2, True, True, False]
        with open(datasetFilename, "r") as datasetFile:
            data = json.load(datasetFile)["questions"]
        for i in range(5):
            #evalRes = runDemo(sess, model, data["main"], epoch)
            # data to instance:
            instance = data[20]
            question = q[i] #instance["question"]
            questionSeq = preprocessor.tokenize(question)
            answer = a[i] #instance.get("answer", "yes") # DUMMY_ANSWER
            dummyProgram = [{"function": "FUNC", "value_inputs": [], "inputs": []}]
            program = instance.get("program", dummyProgram)
            postfixProgram = program #preprocessor.programTranslator.programToPostfixProgram(program)
            programSeq = preprocessor.programTranslator.programToSeq(postfixProgram)
            programInputs = preprocessor.programTranslator.programToInputs(postfixProgram, offset = 2)
            print('DEBUG1:', question, questionSeq, answer,  instance["image_index"])
            #print('DEBUG2:', dummyProgram)
            #print('DEBUG3:', program)
            # pass other fields to instance?
            instances = []
            instances.append({
                    "question": question,
                    "questionSeq": questionSeq,
                    "answer": answer,
                    "imageId": instance["image_index"],
                    "program": program,
                    "programSeq": programSeq,
                    "programInputs": programInputs,
                    "index": 0
                    })
            dataset = {"debug": None}
            buckets = preprocessor.vectorizeData(instances)
            #print('buckets =', buckets)
            dataset["debug"] = {"data": buckets, "images": images, "train": False}
            evalRes = runDemo(sess, model, dataset, epoch)
            pred = evalRes["debug"]["preds"][0]["prediction"]
            print('Answer/Predict is', answer, pred)
            #print('ID/Text decoded is', i, preprocessor.answerDict.decodeId(pred))
            #####################################################
        
        #extraEvalRes = None

        #print("took {:.2f} seconds".format(time.time() - start))
        #printDatasetResults(None, evalRes, extraEvalRes)

        #print("Writing predictions...")
        #writePreds(preprocessor, evalRes, extraEvalRes)

        print(bcolored("Done!","white"))