Exemplo n.º 1
0
    def kernelMetadata(self):
        # Returns citation metadata as a mapping.KernelMetadata object.
        # The mapping is based on the identifier's preferred metadata
        # profile.  Missing attributes will be None.
        import mapping

        return mapping.map(self.cm, profile=self.profile.label)
Exemplo n.º 2
0
def details(request):
    """ ID Details page for a given ID """
    d = {'menu_item': 'ui_manage.null'}
    d["testPrefixes"] = uic.testPrefixes
    identifier = request.path_info[len("/id/"):]
    r = _getLatestMetadata(identifier,
                           request,
                           prefixMatch=(request.GET.get(
                               "prefix_match", "no").lower() == "yes"))
    if type(r) is str:
        django.contrib.messages.error(
            request, uic.formatError(r + ":  " + identifier))
        # ToDo: Pass details in from previous screen so we know where to send redirect back to
        if userauth.getUser(request) == None:
            return redirect("ui_search.index")
        else:
            return redirect("ui_home.index")
    s, id_metadata = r
    assert s.startswith("success:")
    if " in_lieu_of " in s:
        newid = s.split()[1]
        django.contrib.messages.info(
            request,
            "Identifier %s returned in lieu of %s." % (newid, identifier))
        return redirect("/id/" + urllib.quote(newid, ":/"))
    d['allow_update'] = policy.authorizeUpdateLegacy(
        userauth.getUser(request, returnAnonymous=True), id_metadata["_owner"],
        id_metadata["_ownergroup"])
    d['identifier'] = id_metadata
    d['id_text'] = s.split()[1]
    d['id_as_url'] = util2.urlForm(d['id_text'])
    d['is_test_id'] = _isTestId(d['id_text'], d['testPrefixes'])
    d['internal_profile'] = metadata.getProfile('internal')
    d['target'] = id_metadata['_target']
    d['current_profile'] = metadata.getProfile(id_metadata['_profile']) or\
      metadata.getProfile('erc')
    d['recent_creation'] = identifier.startswith('doi') and \
          (time.time() - float(id_metadata['_created']) < 60 * 30)
    d['recent_update'] = identifier.startswith('doi') and \
          (time.time() - float(id_metadata['_updated']) < 60 * 30)
    if d['current_profile'].name == 'datacite' and 'datacite' in id_metadata:
        r = datacite.dcmsRecordToHtml(id_metadata["datacite"])
        if r:
            d['datacite_html'] = r
    if d['current_profile'].name == 'crossref' and 'crossref' in id_metadata and \
      id_metadata['crossref'].strip() != "":
        d['has_crossref_metadata'] = True
    t_stat = [x.strip() for x in id_metadata['_status'].split("|", 1)]
    d['pub_status'] = t_stat[0]
    if t_stat[0] == 'unavailable' and len(t_stat) > 1:
        d['stat_reason'] = t_stat[1]
    if t_stat[0] == 'public' and identifier.startswith("ark:/"):
        d['schemaDotOrgMetadata'] = _schemaDotOrgMetadata(
            mapping.map(id_metadata), d['id_as_url'])
    d['has_block_data'] = uic.identifier_has_block_data(id_metadata)
    d['has_resource_type'] = True if (d['current_profile'].name == 'datacite' \
      and 'datacite.resourcetype' in id_metadata \
      and id_metadata['datacite.resourcetype'] != '') else False
    return uic.render(request, "manage/details", d)
Exemplo n.º 3
0
def visualize_data():
    isTwo = False

    for day in range(0, 7):
        X = []
        Y = []
        for i in range(0, len(lights)):
            if timeON[day][i] != 0:
                X.append(mapping.map(lights[i]))
                Y.append(timeON[day][i] / 60)
        if X and Y:
            zipped = sorted(zip(X, Y), key=lambda x: x[1], reverse=True)
            unzipped = list(zip(*zipped))
            Y = unzipped[1]
            X = list(range(0, len(Y)))
            my_xticks = unzipped[0]
            plt.clf()
            plt.xticks(X, my_xticks)
            plt.bar(X, Y)
            plt.title(str(mapping.weekday(day)))
            plt.ylabel('min')
            plt.gcf().subplots_adjust(bottom=0.30)
            plt.xticks(X, rotation='vertical')
            imgPath = path + str(mapping.weekday(day)).replace("\n", "") + ".png"
            plt.savefig(imgPath)
            if isTwo:
                rep.image(imgPath, 30, 140, 160, 120)
                isTwo = False
            else:
                rep.add_page()
                rep.image(imgPath, 30, 10, 160, 120)
                isTwo = True

    X = []
    Y = []
    my_xticks = []
    for day in range(0, 7):
        my_xticks.append(mapping.weekday(day))
    for item in timeON:
        Y.append(sum(item) / 3600)
    plt.clf()
    X = list(range(0, len(Y)))
    plt.xticks(X, my_xticks)
    plt.bar(X, Y)
    plt.title("Tydzień")
    plt.ylabel('godz')
    plt.xticks(X, rotation='vertical')
    imgPath = path + "Tydzień" + ".png"
    plt.savefig(imgPath)
    if isTwo:
        rep.image(imgPath, 30, 140, 160, 120)
        isTwo = False
    else:
        rep.add_page()
        rep.image(imgPath, 30, 10, 160, 120)
        isTwo = True

    rep.output(pdf_path + "raport_" + str(datetime.now())[0:10].replace("-", "_") + ".pdf")
Exemplo n.º 4
0
def save_data(power, price):
    with open(path + "results.txt", "w") as file:
        file.write(str(datetime.now()) + "\n\n")
        file.write("max core temp: " + str(max(core_temp)) + "\n")
        file.write("min core temp: " + str(min(core_temp)) + "\n")
        file.write("avg core temp: " + str(sum(core_temp) / len(core_temp)) + "\n")
        for i in range(0, 7):
            rep.add_page()
            rep.set_font('DejaVuBold', '', 4 * f_size)
            rep.cell(W, 2 * c_height, txt=mapping.weekday(i).replace("\n",""), align="C", ln=1)
            rep.set_font('DejaVuBold', '', 2 * f_size)
            ybefore = rep.get_y()
            left_margin = rep.get_x()
            rep.multi_cell(0.25 * W, c_height, txt="Nazwa:", align="C", border=1)
            rep.set_xy(W * 0.25 + left_margin, ybefore)
            rep.multi_cell(0.25 * W, c_height, txt="Ile razy:", align="C", border=1)
            rep.set_xy(W * 0.50 + left_margin, ybefore)
            rep.multi_cell(0.25 * W, c_height, txt="Łączny czas:", align="C", border=1)
            rep.set_xy(W * 0.75 + left_margin, ybefore)
            rep.multi_cell(0.25 * W, c_height, txt="Średni czas:", align="C", border=1)
            rep.set_font('DejaVu', '', 2 * f_size)

            wicketTotal = 0
            file.write(mapping.weekday(i))
            for topic in topics:
                avgLen = 0
                times = 0
                if topic in lights:
                    index = topics.index(topic)
                    times = math.ceil(counters[i][index] / 2)
                    if times != 0:
                        index = lights.index(topic)
                        length = timeON[i][index]
                        if length != 0 and times != 0:
                            avgLen = length / times
                            ybefore = rep.get_y()
                            left_margin = rep.get_x()
                            rep.multi_cell(0.25 * W, c_height, txt=mapping.map(topic), align="L", border=1)
                            rep.set_xy(W * 0.25 + left_margin, ybefore)
                            rep.multi_cell(0.25 * W, c_height, txt=str(times), align="L", border=1)
                            rep.set_xy(W * 0.50 + left_margin, ybefore)
                            rep.multi_cell(0.25 * W, c_height, txt=mapping.time_format(length), align="L", border=1)
                            rep.set_xy(W * 0.75 + left_margin, ybefore)
                            rep.multi_cell(0.25 * W, c_height, txt=mapping.time_format(avgLen), align="L", border=1)

                        file.write(mapping.map(topic) + " | razy: " + str(times) +
                                   " | łącznie: " + mapping.time_format(length) +
                                   " | średnio: " + mapping.time_format(avgLen) + "\n")
                elif topic not in sensors:
                    index = topics.index(topic)
                    if topic in wicket:
                        wicketTotal += counters[i][index]
                    else:
                        times = math.ceil(counters[i][index] / 2)
                    if times != 0:
                        file.write(mapping.map(topic) + " | razy: " + str(times) + "\n")
                        ybefore = rep.get_y()
                        left_margin = rep.get_x()
                        rep.multi_cell(0.25 * W, c_height, txt=mapping.map(topic), align="L", border=1)
                        rep.set_xy(W * 0.25 + left_margin, ybefore)
                        rep.multi_cell(0.25 * W, c_height, txt=str(times), align="L", border=1)
                        rep.set_xy(W * 0.50 + left_margin, ybefore)
                        rep.multi_cell(0.25 * W, c_height, txt="", align="L", border=1)
                        rep.set_xy(W * 0.75 + left_margin, ybefore)
                        rep.multi_cell(0.25 * W, c_height, txt="", align="L", border=1)
            if wicketTotal != 0:
                file.write(mapping.map(wicket[0]) + " | razy: " + str(wicketTotal) + "\n")
                ybefore = rep.get_y()
                left_margin = rep.get_x()
                rep.multi_cell(0.25 * W, c_height, txt=mapping.map(wicket[0]), align="L", border=1)
                rep.set_xy(W * 0.25 + left_margin, ybefore)
                rep.multi_cell(0.25 * W, c_height, txt=str(wicketTotal), align="L", border=1)
                rep.set_xy(W * 0.50 + left_margin, ybefore)
                rep.multi_cell(0.25 * W, c_height, txt="", align="L", border=1)
                rep.set_xy(W * 0.75 + left_margin, ybefore)
                rep.multi_cell(0.25 * W, c_height, txt="", align="L", border=1)
            file.write("Łącznie w ten dzień: " + mapping.time_format(sum(timeON[i])) + " godz.\n")
            rep.ln(c_height)
            rep.multi_cell(W, c_height, txt="Łącznie w ten dzień: " + mapping.time_format(sum(timeON[i])), align="L")

        file.write("\n\nW całym tygodniu:\n")
        rep.add_page()
        rep.set_font('DejaVuBold', '', 4 * f_size)
        rep.cell(W, 2 * c_height, txt="W całym tygodniu", align="C", ln=1)
        rep.set_font('DejaVuBold', '', 2 * f_size)
        ybefore = rep.get_y()
        left_margin = rep.get_x()
        rep.multi_cell(0.50 * W, c_height, txt="Nazwa:", align="C", border=1)
        rep.set_xy(W * 0.50 + left_margin, ybefore)
        rep.multi_cell(0.50 * W, c_height, txt="Ile razy:", align="C", border=1)
        rep.set_font('DejaVu', '', 2 * f_size)
        wicketTotal = 0
        for topic in utils:
            index = topics.index(topic)
            times = 0
            for day in counters:
                if topic in wicket:
                    wicketTotal += day[index]
                else:
                    times += math.ceil(day[index] / 2)
            if times != 0:
                ybefore = rep.get_y()
                left_margin = rep.get_x()
                rep.multi_cell(0.50 * W, c_height, txt=mapping.map(topic), align="L", border=1)
                rep.set_xy(W * 0.50 + left_margin, ybefore)
                rep.multi_cell(0.50 * W, c_height, txt=str(times), align="L", border=1)
                file.write(mapping.map(topic) + " | razy: " + str(times) + "\n")
        if wicketTotal != 0:
            ybefore = rep.get_y()
            left_margin = rep.get_x()
            rep.multi_cell(0.50 * W, c_height, txt=mapping.map(wicket[0]), align="L", border=1)
            rep.set_xy(W * 0.50 + left_margin, ybefore)
            rep.multi_cell(0.50 * W, c_height, txt=str(wicketTotal), align="L", border=1)
            file.write(mapping.map(wicket[0]) + " | razy: " + str(wicketTotal) + "\n")

        rep.ln(2*c_height)
        rep.set_font('DejaVuBold', '', 2 * f_size)
        weekTotal = 0
        for day in timeON:
            weekTotal += sum(day)
        rep.multi_cell(W, c_height, txt="Łącznie w całym tygodniu: ", align="L")
        file.write("\n\nŁącznie w całym tygodniu: " + mapping.time_format(weekTotal) + " godz.\n")
        usage = (weekTotal / 3600 * power) / 1000
        rep.multi_cell(W, c_height, txt="szacowane zużycie: " + '{0:.2f}'.format(usage) + " kWh", align="L")
        rep.multi_cell(W, c_height, txt="szacowany koszt: " + '{0:.2f}'.format(usage * price) + " zł", align="L")
        file.write("szacowane zużycie: " + '{0:.2f}'.format(usage) + " kWh\n")
        file.write("szacowany koszt: " + '{0:.2f}'.format(usage * price) + " zł\n")

        rep.ln(2 * c_height)
        rep.set_font('DejaVu', '', 2 * f_size)
        rep.multi_cell(W, c_height, txt="max core temp: " + str(max(core_temp)), align="L")
        rep.multi_cell(W, c_height, txt="min core temp: " + str(min(core_temp)), align="L")
        rep.multi_cell(W, c_height, txt="avg core temp: " + '{0:.2f}'.format(sum(core_temp) / len(core_temp)), align="L")
        rep.ln(2 * c_height)
        rep.multi_cell(W, c_height, txt="data: " + str(datetime.now()), align="L")
Exemplo n.º 5
0
def gram_main():
    str = get_result.get_result()
    stack = LL1.main(str)
    mapping.map(stack)
Exemplo n.º 6
0
def main():
    def menu():
        #print what options you have
        print "\n*** OPTIONS ***"
        print "0) Quit           5) Right Kick       10) Walk Left         15) Stand Up from Sit      20) R to S       25)Goalie"
        print "1) Stiffness ON   6) Walk Foward      11) Walk Right        16) Find Red Ball L to R   21) R to L       26) Map"
        print "2) Stiffness OFF  7) Walk Backwards   12) stand up on back  17) Find Red Ball L to S   22) Stop Tracker 27) Kalman Filter"
        print "3) Goalie Pose    8) Turn Left        13) stand up on front 18) Find Red Ball L to L   23) Dive Left    28) Goalie intercepts clockwise"
        print "4) Left Kick      9) Turn Right       14) Sit Down          19) Find Red Ball R to L   24) Dive Right   29) Goalie intercepts counterclockwise"
        print "type what you want him to say in quotations"
        return input ("\nChoose your option(s) as list: ")

    # NOW THE PROGRAM REALLY STARTS, AS CODE IS RUN
    loop = 1
    choice = 0
    while loop == 1:
        choice = menu()
        k=0
        while k < len(choice):
            if choice[k] == 0:
                loop = 0
                break
            elif choice[k] == 1:
                config.stiffnessOn()
            elif choice[k] == 2:
                config.stiffnessOff()
            elif choice[k] == 3:
                goalie.goaliePose()
            elif choice[k] == 4:
                kick.kickLeftFoot()
            elif choice[k] == 5:
                kick.kickRightFoot()
            elif choice[k] == 6:
                walk.ultimateWalkTo(1, 00, 00)
            elif choice[k] == 7:
                walk.ultimateWalkTo(-1, 00, 00)
            elif choice[k] == 8:
                walk.ultimateWalkTo(0,0,2)
            elif choice[k] == 9:
                walk.ultimateWalkTo(0,0,-2)
            elif choice[k] == 10:
                walk.ultimateWalkTo(0, -1, 0)
            elif choice[k] == 11:
                walk.ultimateWalkTo(0, 1, 0)
            elif choice[k] == 12:
                stand.standUp()
            elif choice[k] == 13:
                stand.standUpOnFront()
            elif choice[k] == 14:
                stand.sitDown()
            elif choice[k] == 15:
                stand.standFromSit()
            elif choice[k] == 16:
                track.findRedBallAndKick(1)
            elif choice[k] == 17:
                track.findRedBallAndKick(2)
            elif choice[k] == 18:
                track.findRedBallAndKick(3)
            elif choice[k] == 19:
                track.findRedBallAndKick(4)
            elif choice[k] == 20:
                track.findRedBallAndKick(5)
            elif choice[k] == 21:
                track.findRedBallAndKick(6)
            elif choice[k] == 22:
                track.stopTracker()
            elif choice[k] == 23:
                dive.diveLeft()
            elif choice[k] == 24:
                dive.diveRight()
            elif choice[k] == 25:
                goalie.goalie()
            elif choice[k] == 26:
                mapping.map()
            elif choice[k] == 27:
                kalmanthreaded.kalman().start()
            elif choice[k] == 28:
                goalie.goalieImproved(1)
            elif choice[k] == 29:
                goalie.goalieImproved(0)
            elif choice[k] == 30:
                kalmanthreaded.stop()
            elif isinstance(choice[k], basestring): #this checks to see if the input is a string.
                texttospeechProxy.say(choice[k])    #if it is a string it will make the nao speak it.
            else:
                #this else statement is used to try out new files that you dont want in the comand menu.
                #just type out what you want it to run.
                #example [kalmanthreaded.get('xposition')]
                choice[k]
            k = k+1
Exemplo n.º 7
0
 def map(self):
     mapping.map(self)
Exemplo n.º 8
0
             overrides["datacite." + e] = metadata["datacite." + e].strip()
     if "datacite.publicationyear" in overrides:
         try:
             overrides[
                 "datacite.publicationyear"] = ezidapp.models.validation.publicationDate(
                     overrides["datacite.publicationyear"])[:4]
         except:
             overrides["datacite.publicationyear"] = "0000"
     try:
         return util.insertXmlEncodingDeclaration(
             crossrefToDatacite(metadata["crossref"].strip(), overrides))
     except Exception, e:
         assert False, "Crossref to DataCite metadata conversion error: " + str(
             e)
 else:
     km = mapping.map(metadata, datacitePriority=True, profile=profile)
     for a in ["creator", "title", "publisher", "date"]:
         if getattr(km, a) == None:
             if supplyMissing:
                 setattr(km, a, "(:unav)")
             else:
                 assert False, "no " + ("publication date"
                                        if a == "date" else a)
     d = km.validatedDate
     r = _interpolate(
         _metadataTemplate,
         idType,
         idBody,
         km.creator,
         km.title,
         km.publisher,
Exemplo n.º 9
0
    def cg(self, force=False, com=False):
        # Generate the coarse grained structure
        # Set the b-factor field to something that reflects the secondary structure

        # If the coarse grained structure is set already, just return,
        # unless regeneration is forced.
        if self._cg and not force:
            return self._cg
        self._cg = []
        atid     = 1
        bb       = [1]
        fail     = False
        previous = ''
        for residue, rss, resname in zip(self.residues, self.sstypes, self.sequence):
            # For DNA we need to get the O3' to the following residue when calculating COM
            # The force and com options ensure that this part does not affect itp generation or anything else
            if com:
                # Just an initialization, this should complain if it isn't updated in the loop
                store = 0
                for ind, i in enumerate(residue):
                    if i[0] == "O3'":
                        if previous != '':
                            residue[ind] = previous
                            previous = i
                        else:
                            store = ind
                            previous = i
                # We couldn't remove the O3' from the 5' end residue during the loop so we do it now
                if store > 0:
                    del residue[store]

            # Check if residues names has changed, for example because user has set residues interactively.
            residue = [(atom[0], resname)+atom[2:] for atom in residue]
            if residue[0][1] in ("SOL", "HOH", "TIP"):
                continue
            if not residue[0][1] in self.options['ForceField'].mapping.keys():
                logging.warning("Skipped unknown residue %s\n" % residue[0][1])
                continue
            # Get the mapping for this residue
            # CG.map returns bead coordinates and mapped atoms
            # This will fail if there are (too many) atoms missing, which is
            # only problematic if a mapped structure is written; the topology
            # is inferred from the sequence. So this is the best place to raise
            # an error
            try:
                beads, ids = mapping.map(residue, ff=self.options['ForceField'],
                                         ca2bb=self.options['ForceField'].ca2bb)
                beads      = zip(self.options['ForceField'].names[residue[0][1]], beads, ids)
                if residue[0][1] in self.options['ForceField'].polar:
                    beads = add_dummy(beads, dist=0.14, n=2)
                elif residue[0][1] in self.options['ForceField'].charged:
                    beads = add_dummy(beads, dist=0.11, n=1)
            except ValueError:
                logging.error("Too many atoms missing from residue %s %d(ch:%s):",
                              residue[0][1], residue[0][2]-(32 << 20), residue[0][3])
                logging.error(repr([i[0] for i in residue]))
                fail = True

            for name, (x, y, z), ids in beads:
                # Add the bead with coordinates and secondary structure id to the list
                self._cg.append((name, residue[0][1][:3], residue[0][2], residue[0][3], x, y, z, secstruc.ss2num[rss]))
                # Add the ids to the list, after converting them to indices to the list of atoms
                self.mapping.append([atid+i for i in ids])

            # Increment the atom id; This pertains to the atoms that are included in the output.
            atid += len(residue)

            # Keep track of the numbers for CONECTing
            bb.append(bb[-1]+len(beads))

        if fail:
            logging.error("Unable to generate coarse grained structure due to missing atoms.")
            sys.exit(1)

        return self._cg
Exemplo n.º 10
0
def run():
    # initialize VGG Model and PCA
    iset = init.Init()
    # initialize neural network model
    model = networks.Network()
    model.init_model()
    # initialize global instance
    uset = users.Users()

    # store special features in memory
    # dset_special = dataset.Dataset(set.PATH_TO_SPECIAL)
    dset_special = None
    print "Dataset Loaded."
    # set normal features in memory to false
    is_normal_loaded = True
    tset_name = None
    is_reloaded = False
    m_checkpoints = 0

    while True:

        queue = db.lrange(set.REQUEST_QUEUE, set.REQUEST_START, set.REQUEST_END)
        q_uid = None
        # initialize local instance
        select = selectonly.Select()
        finalize = save.Save()
        viewer = view.View()
        retrain_v = retrainView.retrainView()
        retrain_h = retrainHeatmap.retrainHeatmap()
        heat = heatmap.Heatmap()
        t_train = train.Train()
        report_label = label.label()
        report_count = count.count()
        report_map = mapping.map()

        for q in queue:

            q = json.loads(q.decode("utf-8"))
            q_uid = q["uid"]
            target = q["target"]
            session_uid = q["uid"]
            dataSetPath = set.DATASET_DIR + q["dataset"]
            pcaPath = set.DATASET_DIR + q["pca"]
            # if specific features then set m_loaded to true
            is_normal_loaded = False if dataSetPath == set.PATH_TO_SPECIAL else True

            if target == "label":
                report_label.setData(q)

            if target == "count":
                report_count.setData(q)

            if target == "map":
                report_map.setData(q)

            if target == 'selectonly':
                select.setData(q)

            if target == 'save':
                finalize.setData(q)

            if target == 'view':
                viewer.setData(q)

            if target == 'retrainView':
                retrain_v.setData(q)

            if target == 'retrainHeatmap':
                retrain_h.setData(q)

            if target == 'heatmapAll':
                heatmaps = q["viewJSONs"]

            if target == 'heatmap':
                heat.setData(q)

            if target == 'train':
                t_train.setData(q)

            if target == 'reload':
                t_path = set.TRAININGSET_DIR + q["trainingSetName"]
                is_reloaded = True

            if target == 'reviewSave':
                q_samples = json.loads(q["samples"])

        if q_uid is not None:

            print target, " Session Start ....."

            no_uid = True
            uidx = 0

            # find current user Index
            for i in range(len(uset.users)):
                if uset.users[i]['uid'] == session_uid:
                    uidx = i
                    no_uid = False

            if no_uid:
                # set users data
                uset.addUser(session_uid)

            if is_normal_loaded:
                dset = dataset.Dataset(dataSetPath)
            else:
                dset = dset_special

            PCA = joblib.load(pcaPath)

            if target == 'selectonly':
                uset.setIter(uidx, select.iter)
                print "Predict Start ... "
                t0 = time()
                scores = model.predict_prob(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0
                # Find uncertain samples
                data = select.getData(scores, dset.slideIdx, dset.slides, dset.x_centroid, dset.y_centroid)
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'view':
                slide_idx = dset.getSlideIdx(viewer.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Predict Start ... "
                t0 = time()
                predictions = model.predict(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                object_idx = load(
                    viewer.left, viewer.right, viewer.top, viewer.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )
                data = {}

                for i in object_idx:
                    data[str(x_centroid_set[i][0])+'_'+str(y_centroid_set[i][0])] = str(predictions[i])

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'heatmap':
                slide_idx = dset.getSlideIdx(heat.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Predict Start ... "
                t0 = time()
                if set.IS_HEATMAP == False:
                    scores = model.predict_prob(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                # set x and y maps
                heat.setXandYmap()
                # write heatmaps
                heat.setHeatMap(x_centroid_set, y_centroid_set, scores)
                # get heatmap data
                data = heat.getData(0)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'heatmapAll':
                data = []
                index = 0

                t0 = time()
                scores = model.predict_prob(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0

                for h in heatmaps:

                    h['uid'] = session_uid
                    heat.setData(h)

                    slide_idx = dset.getSlideIdx(heat.slide)
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    # feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    score_set = scores[data_idx: data_idx+object_num]
                    # set x and y maps
                    heat.setXandYmap()
                    # write heatmaps
                    heat.setHeatMap(x_centroid_set, y_centroid_set, score_set)
                    # get heatmap data
                    data_k = heat.getData(index)
                    data.append(data_k)
                    index += 1

                # print data
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'reload':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                # uset.setReloadedData(uidx, t_path, dset.slides)
                uset.setReloadedData(uidx, t_path)

                sample_size = len(uset.users[uidx]['samples'])

                m_checkpoints = uset.users[uidx]['samples'][sample_size-1]['checkpoints']

                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                tset_path = t_path.split('/')[-1]
                tset_name = tset_path.split('.')[0]

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'label':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_label.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_label.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                slide_idx = dset.getSlideIdx(report_label.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                test_features = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(test_features)
                t1 = time()
                print "Predict took ", t1 - t0

                inputImageFile = '/datasets/tif/'+ report_label.slide + '.svs.dzi.tif'

                bold = 512
                bold_left = report_label.left - bold
                bold_top = report_label.top - bold
                bold_bottom = report_label.bottom + bold
                bold_right = report_label.right + bold
                bold_width = report_label.width + 2*bold
                bold_height = report_label.height + 2*bold

                ts = large_image.getTileSource(inputImageFile)

                region = dict(
                    left=report_label.left, top=report_label.top,
                    width=report_label.width, height=report_label.height,
                )

                im_region = ts.getRegion(
                    region=region, format=large_image.tilesource.TILE_FORMAT_NUMPY
                )[0]

                mydb = mysql.connector.connect(
                  host=set.MYSQL_HOST,
                  user="******",
                  passwd="guest",
                  database="nuclei",
                  charset='utf8',
                  use_unicode=True
                )

                boundaryTablename = 'sregionboundaries'

                runcursor = mydb.cursor()

                query = 'SELECT centroid_x, centroid_y, boundary from ' + boundaryTablename + ' where slide="' +  report_label.slide + \
                '" AND centroid_x BETWEEN ' + str(report_label.left) + ' AND ' + str(report_label.right) + \
                ' AND centroid_y BETWEEN ' + str(report_label.top) + ' AND ' + str(report_label.bottom)

                runcursor.execute(query)

                boundarySet = runcursor.fetchall()

                # find region index from hdf5
                object_idx = load(
                    report_label.left, report_label.right, report_label.top, report_label.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )

                # set an array for boundary points in a region to zero
                im_bold = np.zeros((bold_height, bold_width), dtype=np.uint8)

                for i in object_idx:
                    for j in range(len(boundarySet)):
                      x = int(boundarySet[j][0])
                      y = int(boundarySet[j][1])
                      boundaryPoints = []
                      if x == int(x_centroid_set[i, 0]) and y == int(y_centroid_set[i, 0]):
                          object = boundarySet[j][2].encode('utf-8').split(' ')
                          object_points = []
                          for p in range(len(object)-1):
                              intP = map(int, object[p].split(','))
                              intP[0] = intP[0] - report_label.left + bold
                              intP[1] = intP[1] - report_label.top + bold
                              object_points.append(intP)
                          boundaryPoints.append(np.asarray(object_points))
                          cv2.fillPoly(im_bold, boundaryPoints, 255 if predicts[i] > 0 else 128)

                im_out = im_bold[bold:bold+report_label.height, bold:bold+report_label.width]

                imsave(report_label.inFile, im_out)

                runcursor.close()
                mydb.close()

                print ("label success ", report_label.inFile)
                data = {"success": report_label.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("label done")

            if target == 'count':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_count.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_count.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0

                # find positive and negative numbers for each slide
                pos_num = []
                neg_num = []

                for i in range(dset.n_slides):
                    if i == len(dset.dataIdx) - 1:
                        predict = predicts[dset.dataIdx[i, 0]:]
                    else:
                        predict = predicts[dset.dataIdx[i, 0]: dset.dataIdx[i+1, 0]]
                    pos = len(predict[predict>0])
                    neg = len(predict) - pos
                    pos_num.append(pos)
                    neg_num.append(neg)

                print('>> Writing count file')
                out_file = open(report_count.inFile, 'w')

                out_file.write("Slide\t")
                out_file.write("Predicted positive (superpixels)\t")
                out_file.write("Predicted negative (superpixels)\t")                
                out_file.write("\n")

                for i in range(len(dset.slides)):
                    out_file.write("%s\t" % dset.slides[i])
                    out_file.write("%d\t" % pos_num[i])
                    out_file.write("%d\t" % neg_num[i])
                    out_file.write("\n")

                out_file.close()
                print ("count success ", report_count.inFile)
                data = {"success": report_count.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("count done")

            if target == 'map':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_map.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_map.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                slide_idx = dset.getSlideIdx(report_map.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                test_features = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(test_features)
                t1 = time()
                print "Predict took ", t1 - t0

                output = h5py.File(report_map.inFile, 'w')
                output.create_dataset('features', data=test_features)
                output.create_dataset('predicts', data=predicts)
                output.create_dataset('x_centroid', data=x_centroid_set)
                output.create_dataset('y_centroid', data=y_centroid_set)
                output.create_dataset('slides', data=[report_map.slide])
                output.close()

                print ("map success ", report_map.inFile)
                data = {"success": report_map.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("map done")

            if target == 'save':
                data = finalize.getData(uset.users[uidx])
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'review':
                data = {}
                data['review'] = []

                for sample in uset.users[uidx]['samples']:
                    sample_data = {}
                    sample_data['id'] = str(sample['id'])
                    sample_data['label'] = 1 if sample['label'] == 1 else -1
                    sample_data['iteration'] = int(sample['iteration'])
                    sample_data['slide'] = str(sample['slide'])
                    sample_data['centX'] = str(sample['centX'])
                    sample_data['centY'] = str(sample['centY'])
                    sample_data['boundary'] = ""
                    sample_data['maxX'] = 0
                    sample_data['maxY'] = 0

                    data['review'].append(sample_data)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'train':
                # increase checkpoint by 1
                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()
                uset.setIter(uidx, t_train.iter)

                for sample in t_train.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # check db_id in users samples
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = t_train.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                # train_labels = to_categorical(train_labels, num_classes=2)
                if tset_name is None:
                    tset_name = t_train.classifier

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'retrainView':

                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()

                uset.setIter(uidx, retrain_v.iter)

                print "Augment ... ", len(retrain_v.samples)
                t0 = time()
                for sample in retrain_v.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # remove samples stored if it already exists
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = retrain_v.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                t1 = time()
                print "Augmentation took ", t1 - t0
                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                # train_labels = to_categorical(train_labels, num_classes=2)
                if tset_name is None:
                    tset_name = retrain_v.classifier

                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0, " ", len(train_labels), "Samples"

                slide_idx = dset.getSlideIdx(retrain_v.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                predictions = model.predict(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0

                object_idx = load(
                    retrain_v.left, retrain_v.right, retrain_v.top, retrain_v.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )
                data = {}
                for i in object_idx:
                    data[str(x_centroid_set[i][0])+'_'+str(y_centroid_set[i][0])] = str(predictions[i])

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'retrainHeatmap':
                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()

                uset.setIter(uidx, retrain_h.iter)

                for sample in retrain_h.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # remove samples stored if it already exists
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = retrain_h.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                if tset_name is None:
                    tset_name = retrain_h.classifier

                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0, " ", len(train_labels), "Samples"

                slide_idx = dset.getSlideIdx(retrain_h.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                if set.IS_HEATMAP == False:
                    scores = model.predict_prob(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                # set x and y maps
                retrain_h.setXandYmap()
                # write heatmaps
                retrain_h.setHeatMap(x_centroid_set, y_centroid_set, scores)
                # get heatmap data
                data = retrain_h.getData(0)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'cancel':

                uset.users = []
                uset.u_size = 0
                is_normal_loaded = True
                tset_name = None
                is_reloaded = False
                m_checkpoints = 0

                del select
                del finalize
                del viewer
                del retrain_v
                del retrain_h
                del heat
                del t_train
                del report_label

                model = networks.Network()
                model.init_model()
                # dset = dataset.Dataset(set.PATH_TO_SPECIAL)

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'reviewSave':
                # modify labels if they are changed on review tab
                for q_sample in q_samples:
                    for sample in uset.users[uidx]['samples']:
                        if sample['id'] == q_sample['id']:
                            sample['label'] = 1 if q_sample['label'] == 1 else 0

                    for sample in uset.users[uidx]['augments']:
                        if sample['id'][0] == q_sample['id']:
                            sample['label'][:] = 1 if q_sample['label'] == 1 else 0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)
Exemplo n.º 11
0
def run(video, pts_src, sport):
    Player_Marked = False

    # otherwise, grab a reference to the video file
    i = 0
    FPS_SMOOTHING = 0.9

    fps = 0.0
    prev = 0

    distance = 0
    total_distance = 0
    speed_list = []
    max_speed = 0
    min_speed = 0
    pointer = 0
    t = 3
    frames_count = 0

    if (sport == 1):
        per_frame = 90
        t = 3
    else:
        per_frame = 60
        t = 2
    # loop over frames from the video stream
    vs = cv2.VideoCapture(video)
    #change_res(vs,854,480)

    trackers = cv2.MultiTracker_create()

    while True:
        frames_count = frames_count + 1
        # grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()
        frame = frame[1]
        # now = time.time()
        # fps = (fps*FPS_SMOOTHING + (1/(now - prev))*(1.0 - FPS_SMOOTHING))
        # prev = now
        # fpstext = 'FPS = ' + str(int(fps))

        ##real_time_stats_on_screen
        real_time(frame, distance, total_distance, max_speed, min_speed)

        # check to see if we have reached the end of the stream
        if frame is None:
            #print(corr)
            average_speed = sum(speed_list) / len(speed_list)

            return xs, ys, frames, total_distance, max_speed, min_speed, average_speed
            break

        # resize the frame (so we can process it faster)
        #frame = imutils.resize(frame, width=1920,height=1080)
        # grab the updated bounding box coordinates (if any) for each
        # object that is being tracked
        (success, boxes) = trackers.update(frame)

        # loop over the bounding boxes and draw then on the frame
        for box in boxes:

            (x, y, w, h) = [int(v) for v in box]
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)

            # print(str(x+int(w/2))+','+str(y+int(h/2)))

            if (i % per_frame == 0):
                # xs.append(x+int(w/2))
                # ys.append(y+int(h))
                x_map, y_map = map.map([x + int(w / 2), y + int(h)], pts_src,
                                       sport)
                xs.append(x_map)
                ys.append(y_map)
                frames.append(frames_count)

                points.append(x_map)
                points.append(y_map)

                if (len(points) >= 4):
                    #(x1,y1,x2,y2)
                    #distance = mt.distance(points[pointer-3],points[pointer-2],points[pointer-1],points[pointer])/100
                    distance = dis.calculateDistance(points[pointer - 3],
                                                     points[pointer - 2],
                                                     points[pointer - 1],
                                                     points[pointer], sport)
                    total_distance = total_distance + distance
                    speed = speedFunction.calculate_Speed(distance, t)
                    speed_list.append(speed)
                    max_speed = max(speed_list)
                    min_speed = min(speed_list)

                pointer += 2

            cv2.circle(frame, (x + int(w / 2), y + int(h)), 5, red, -1)

            i += 1
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if (Player_Marked == False):

            if key == ord("s"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                # mt.mappingMatch()
                # mt.mappingPlayer()

                box = cv2.selectROI("Frame",
                                    frame,
                                    fromCenter=False,
                                    showCrosshair=True)

                # create a new object tracker for the bounding box and add it
                # to our multi-object tracker
                tracker = OPENCV_OBJECT_TRACKERS['csrt']()
                trackers.add(tracker, frame, box)
                Player_Marked = True

                # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        if key == ord("p"):
            cv2.waitKey()

    # otherwise, release the file pointer
    else:
        vs.release()

    # close all windows
    cv2.destroyAllWindows()
Exemplo n.º 12
0
    def cg(self, force=False, com=False):
        # Generate the coarse grained structure
        # Set the b-factor field to something that reflects the secondary structure

        # If the coarse grained structure is set already, just return,
        # unless regeneration is forced.
        if self._cg and not force:
            return self._cg
        self._cg = []
        atid = 1
        bb = [1]
        fail = False
        previous = ''
        for residue, rss, resname in zip(self.residues, self.sstypes,
                                         self.sequence):
            # For DNA we need to get the O3' to the following residue when calculating COM
            # The force and com options ensure that this part does not affect itp generation or anything else
            if com:
                # Just an initialization, this should complain if it isn't updated in the loop
                store = 0
                for ind, i in enumerate(residue):
                    if i[0] == "O3'":
                        if previous != '':
                            residue[ind] = previous
                            previous = i
                        else:
                            store = ind
                            previous = i
                # We couldn't remove the O3' from the 5' end residue during the loop so we do it now
                if store > 0:
                    del residue[store]

            # Check if residues names has changed, for example because user has set residues interactively.
            residue = [(atom[0], resname) + atom[2:] for atom in residue]
            if residue[0][1] in ("SOL", "HOH", "TIP"):
                continue
            if not residue[0][1] in mapping.CoarseGrained.mapping.keys():
                logging.warning("Skipped unknown residue %s\n" % residue[0][1])
                continue
            # Get the mapping for this residue
            # CG.map returns bead coordinates and mapped atoms
            # This will fail if there are (too many) atoms missing, which is
            # only problematic if a mapped structure is written; the topology
            # is inferred from the sequence. So this is the best place to raise
            # an error
            try:
                beads, ids = mapping.map(
                    residue, ca2bb=self.options['ForceField'].ca2bb)
                beads = zip(mapping.CoarseGrained.names[residue[0][1]], beads,
                            ids)
                if residue[0][1] in self.options['ForceField'].polar:
                    beads = add_dummy(beads, dist=0.14, n=2)
                elif residue[0][1] in self.options['ForceField'].charged:
                    beads = add_dummy(beads, dist=0.11, n=1)
            except ValueError:
                logging.error(
                    "Too many atoms missing from residue %s %d(ch:%s):",
                    residue[0][1], residue[0][2] - (32 << 20), residue[0][3])
                logging.error(repr([i[0] for i in residue]))
                fail = True

            for name, (x, y, z), ids in beads:
                # Add the bead with coordinates and secondary structure id to the list
                self._cg.append((name, residue[0][1][:3], residue[0][2],
                                 residue[0][3], x, y, z, secstruc.ss2num[rss]))
                # Add the ids to the list, after converting them to indices to the list of atoms
                self.mapping.append([atid + i for i in ids])

            # Increment the atom id; This pertains to the atoms that are included in the output.
            atid += len(residue)

            # Keep track of the numbers for CONECTing
            bb.append(bb[-1] + len(beads))

        if fail:
            logging.error(
                "Unable to generate coarse grained structure due to missing atoms."
            )
            sys.exit(1)

        return self._cg
Exemplo n.º 13
0
 def mapping_env(self, save_map=False):
     xr, yr, name = mp.map(self.ranges, save_file=save_map)
     if name != None:
         mp.plot_map(name)
     self.data_pc = np.array([xr, yr])