Beispiel #1
0
    def run(self, parsedCommandLine):
        (buildSteps, self._custom_args) = (parsedCommandLine[0], parsedCommandLine[1])

        # this build MUST have a project name to run
        if self._project_name == "":
            Utilities.failExecution("Project name not set")

        # if the user has not specified any build steps, run the default
        if len(buildSteps) == 0:
            buildSteps = self._build_steps

        # run the build for the user specified configuration else run for
        # all configurations (the user can restrict this to build for
        # debug or release versions)
        if "configuration" in self._custom_args:
            self._config = self._custom_args["configuration"]
            if self._config != "release" and self._config != "debug":
                Utilities.failExecution("Unknown configuration [%s]" % self._config)
            print("\nbuilding configuration [%s]\n" % self._config)
            self.executeBuildSteps(buildSteps)
        else:
            for configuration in self._configurations:
                print("\nbuilding configuration [%s]\n" % configuration)
                self._config = configuration
                self.executeBuildSteps(buildSteps)

        print("********************")
        print("*     COMPLETE     *")
        print("********************")
Beispiel #2
0
def main():
    # training parameter
    result_path = 'results/housingLiR_1.mse'
    model_name = 'housing_shiftAndScale'
    # normalization = Preprocess.zero_mean_unit_var
    normalization = Preprocess.shift_and_scale
    # cols_not_norm = (0,7,12)
    cols_not_norm = []

    # laod and preprocess training data
    training_data = loader.load_dataset('data/housing_train.txt')
    testing_data = loader.load_dataset('data/housing_test.txt')
    Preprocess.normalize_features_all(normalization, training_data[0], testing_data[0], cols_not_norm)


    # start training
    model = rm.LinearRegression()
    model.build(training_data[0], training_data[1])
    training_mse = model.test(training_data[0], training_data[1], util.mse)
    testing_mse = model.test(testing_data[0], testing_data[1], util.mse)
    print 'Error for training data is:'
    print training_mse
    print 'Error for testing data is:'
    print testing_mse

    result = {}
    result['TrainingMSE'] = str(training_mse)
    result['TestingMSE'] = str(testing_mse)
    result['Theta'] = str(model.theta)

    # log the training result to file
    util.write_result_to_file(result_path, model_name, result)
Beispiel #3
0
    def __init__(self, fitness=2):

        screen = pygame.display.get_surface()
        self.area = screen.get_rect()

        self.fitness = fitness
        self.awareness = random.randint(50,60) + self.fitness
        self.speed = random.randint(30,40) + self.fitness

        self.radius = self.awareness

        self.target = None
        self.tempTarget = (random.randint(self.area.left + 50, self.area.right - 50),random.randint(self.area.top + 50, self.area.bottom - 50))

        self.high = False
        self.med = False
        self.low = False

        self.highFitness = Utilities.load_image('infectedSuper.png')
        self.medFitness = Utilities.load_image('infected1.png')
        self.lowFitness = Utilities.load_image('infected2.png')

        if(self.fitness >= 10):
            self.image, self.rect = self.highFitness
        elif(self.fitness >= 5):
            self.image, self.rect = self.medFitness
        elif(self.fitness < 5):
            self.image, self.rect = self.lowFitness
            self.low = True

        self.rect.center = (random.randint(self.area.left+6, self.area.right - 6), random.randint(self.area.top + 32, self.area.bottom - 6))

        pygame.sprite.Sprite.__init__(self)
        self.active = True
Beispiel #4
0
 def parseDependencyFile(self):
     dependencyFilePath = os.path.join(FileSystem.getDirectory(FileSystem.DEPENDENCIES), "dependencies.txt")
     if not os.path.exists(dependencyFilePath):
         Utilities.failExecution("dependency file [%s] does not exist" % dependencyFilePath)
     requiredProjects = []
     with open(dependencyFilePath, 'r') as file:
         flag = False
         lineNum = 0
         splitLine = None
         for line in file:
             splitLine = line.strip().split(None)
             if len(splitLine) == 0 or splitLine[0] == '#':
                 continue
             if splitLine[0] == '-' + self._project_name:
                 flag = True
             elif flag and '-' not in splitLine[0]:
                 requiredProjects.append(splitLine)
             elif flag and '-' in splitLine[0] and ('-' + self._project_name) != splitLine[0]:
                 flag = False
             elif not flag:
                 continue
             else:
                 Utilities.failExecution("Parse error in dependency file [%s] at line [%s]"
                                         % (dependencyFilePath, lineNum))
             lineNum += 1
     print("Required projects for project [%s] are %s" % (self._project_name, requiredProjects))
     return requiredProjects
 def __init__(self, vcs, parent = None):
     """
     Constructor
     
     @param vcs reference to the version control object
     @param parent parent widget (QWidget)
     """
     QDialog.__init__(self, parent)
     self.setupUi(self)
     
     self.vcsDirectoryCompleter = E4DirCompleter(self.vcsUrlEdit)
     self.vcsProjectDirCompleter = E4DirCompleter(self.vcsProjectDirEdit)
     
     self.protocolCombo.addItems(ConfigSvnProtocols)
     
     hd = Utilities.toNativeSeparators(QDir.homePath())
     hd = os.path.join(unicode(hd), 'subversionroot')
     self.vcsUrlEdit.setText(hd)
     
     self.vcs = vcs
     
     self.localPath = unicode(hd)
     self.networkPath = "localhost/"
     self.localProtocol = True
     
     self.vcsProjectDirEdit.setText(Utilities.toNativeSeparators(
         Preferences.getMultiProject("Workspace") or Utilities.getHomeDir()))
def main(filename: 'str') -> None:
    '''
    Finds the palindromes in the file and prints the number of occurences of them.
    '''
    words = Utilities.tokenizeFile(filename)
    frequencies = computePalindromeFrequencies(words)
    Utilities.printFrequencies(frequencies)
 def __init__(self, project, parent=None):
     """
     Constructor
     
     @param project reference to the project object
     @param parent parent widget of this browser (QWidget)
     """
     self.omniidl = Preferences.getCorba("omniidl")
     if self.omniidl == "":
         self.omniidl = Utilities.isWindowsPlatform() and \
             "omniidl.exe" or "omniidl"
     if not Utilities.isinpath(self.omniidl):
         self.omniidl = None
     
     ProjectBaseBrowser.__init__(self, project,
                                 ProjectBrowserInterfaceType, parent)
     
     self.selectedItemsFilter = \
         [ProjectBrowserFileItem, ProjectBrowserSimpleDirectoryItem]
     
     self.setWindowTitle(self.tr('Interfaces (IDL)'))
     
     self.setWhatsThis(self.tr(
         """<b>Project Interfaces Browser</b>"""
         """<p>This allows to easily see all interfaces (CORBA IDL files)"""
         """ contained in the current project. Several actions can be"""
         """ executed via the context menu.</p>"""
     ))
     
     project.prepareRepopulateItem.connect(self._prepareRepopulateItem)
     project.completeRepopulateItem.connect(self._completeRepopulateItem)
 def on_addButton_clicked(self):
     """
     Private slot to add a new idntity.
     """
     name, ok = QInputDialog.getText(
         self,
         self.tr("Add Identity"),
         self.tr("Identity Name:"),
         QLineEdit.Normal)
     
     if ok:
         if name:
             if name in self.__identities:
                 E5MessageBox.critical(
                     self,
                     self.tr("Add Identity"),
                     self.tr(
                         """An identity named <b>{0}</b> already exists."""
                         """ You must provide a different name.""").format(
                         name))
                 self.on_addButton_clicked()
             else:
                 identity = IrcIdentity(name)
                 identity.setIdent(Utilities.getUserName())
                 identity.setRealName(Utilities.getRealName())
                 self.__identities[name] = identity
                 self.identitiesCombo.addItem(name)
                 self.identitiesCombo.setCurrentIndex(
                     self.identitiesCombo.count() - 1)
         else:
             E5MessageBox.critical(
                 self,
                 self.tr("Add Identity"),
                 self.tr("""The identity has to have a name."""))
             self.on_addButton_clicked()
Beispiel #9
0
 def __advance(self):
     """
     Private method to advance to the next error.
     """
     try:
         next(self.__spell)
     except StopIteration:
         self.__enableButtons(False)
         self.contextLabel.setText("")
         self.changeEdit.setText("")
         self.suggestionsList.clear()
         return
     
     self.__enableButtons(True)
     self.word, self.wordStart, self.wordEnd = self.__spell.getError()
     lcontext, rcontext = self.__spell.getContext(
         self.wordStart, self.wordEnd)
     self.changeEdit.setText(self.word)
     self.contextLabel.setText(
         '{0}<font color="#FF0000">{1}</font>{2}'.format(
             Utilities.html_encode(lcontext),
             self.word,
             Utilities.html_encode(rcontext)))
     suggestions = self.__spell.getSuggestions(self.word)
     self.suggestionsList.clear()
     self.suggestionsList.addItems(suggestions)
Beispiel #10
0
def stressTest():
    """ Stress Test function """
    responseDict = {        }
    
    # Do stuff
    os.chdir('./stress')
    p = subprocess.Popen(['fl-run-test','test_Stress.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    err, out = p.communicate() 
    length = len(out.split('\n'))

    #print out.split('\n')[length-2]
    if "FAILED" in out.split('\n')[length-2] :
        u.printError('URL not reachable...')
        u.printError('Stress testing aborted...')
        responseDict['status'] = False
    else :
        u.printSuccess('URL reachable...')
        u.printInfo('Stress testing initiated...')
        p = subprocess.Popen(['fl-run-bench','test_Stress.py','Stress.test_stress'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        p.communicate()
        #out,err = p.communicate()
        #print out.split('\n')
        p = subprocess.Popen(['fl-build-report','--html','stress-bench.xml'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out, error = p.communicate()

        #returnVal = p.communicate()

        # get the path of the generated report...
        path = out.split('\n')[1]
        u.printInfo('Report stored at '+path)
        responseDict['status'] = True
        responseDict['path'] = path
    
    os.chdir('..')
    return responseDict
 def __buildModulesDict(self):
     """
     Private method to build a dictionary of modules contained in the package.
     
     @return dictionary of modules contained in the package.
     """
     moduleDict = {}
     modules = glob.glob(Utilities.normjoinpath(self.packagePath,'*.py')) + \
               glob.glob(Utilities.normjoinpath(self.packagePath,'*.pyw')) + \
               glob.glob(Utilities.normjoinpath(self.packagePath,'*.ptl'))
     tot = len(modules)
     progress = KQProgressDialog(self.trUtf8("Parsing modules..."),
         QString(), 0, tot, self)
     try:
         prog = 0
         progress.show()
         QApplication.processEvents()
         for module in modules:
             progress.setValue(prog)
             QApplication.processEvents()
             prog = prog + 1
             try: 
                 mod = Utilities.ModuleParser.readModule(module, caching = False)
             except ImportError:
                 continue
             else:
                 name = mod.name
                 if name.startswith(self.package):
                     name = name[len(self.package) + 1:]
                 moduleDict[name] = mod
     finally:
         progress.setValue(tot)
     return moduleDict
def drawSkewedCoordinateSystem(context):
    # alpha is 22.5 degrees and beta is 15 degrees.
    alpha = math.pi/8
    beta = math.pi/12
    # Create a rectangle that is 72 units on a side
    # with its origin at (0,0).
    r = Quartz.CGRectMake(0, 0, 72, 72)

    Quartz.CGContextTranslateCTM(context, 144, 144)
    # Draw the coordinate axes untransformed.
    Utilities.drawCoordinateAxes(context)
    # Fill the rectangle.
    Quartz.CGContextFillRect(context, r)

    # Create an affine transform that skews the coordinate system,
    # skewing the x-axis by alpha radians and the y-axis by beta radians.
    skew = Quartz.CGAffineTransformMake(1, math.tan(alpha), math.tan(beta), 1, 0, 0)
    # Apply that transform to the context coordinate system.
    Quartz.CGContextConcatCTM(context, skew)

    # Set the fill and stroke color to a dark blue.
    Quartz.CGContextSetRGBStrokeColor(context, 0.11, 0.208, 0.451, 1)
    Quartz.CGContextSetRGBFillColor(context, 0.11, 0.208, 0.451, 1)

    # Draw the coordinate axes again, now transformed.
    Utilities.drawCoordinateAxes(context)
    # Set the fill color again but with a partially transparent alpha.
    Quartz.CGContextSetRGBFillColor(context, 0.11, 0.208, 0.451, 0.7)
    # Fill the rectangle in the transformed coordinate system.
    Quartz.CGContextFillRect(context, r)
    def __createServerCertificateEntry(self, server, cert):
        """
        Private method to create a server certificate entry.
        
        @param server server name of the certificate (string)
        @param cert certificate to insert (QSslCertificate)
        """
        # step 1: extract the info to be shown
        if qVersion() >= "5.0.0":
            organisation = Utilities.decodeString(", ".join(cert.subjectInfo(QSslCertificate.Organization)))
            commonName = Utilities.decodeString(", ".join(cert.subjectInfo(QSslCertificate.CommonName)))
        else:
            organisation = Utilities.decodeString(cert.subjectInfo(QSslCertificate.Organization))
            commonName = Utilities.decodeString(cert.subjectInfo(QSslCertificate.CommonName))
        if organisation is None or organisation == "":
            organisation = self.tr("(Unknown)")
        if commonName is None or commonName == "":
            commonName = self.tr("(Unknown common name)")
        expiryDate = cert.expiryDate().toString("yyyy-MM-dd")

        # step 2: create the entry
        items = self.serversCertificatesTree.findItems(organisation, Qt.MatchFixedString | Qt.MatchCaseSensitive)
        if len(items) == 0:
            parent = QTreeWidgetItem(self.serversCertificatesTree, [organisation])
        else:
            parent = items[0]

        itm = QTreeWidgetItem(parent, [commonName, server, expiryDate])
        itm.setData(0, self.CertRole, cert.toPem())
def run_server(port):
    """
    Main function to run the server.
    :param port: the port number which the server should run on
    :return:
    """
    # load config file for DH and username/password
    global crypto_service, password_hash_dict, chatting_service, user_addr_dict, auth_dict
    g = c.DH_GENERATOR
    p = util.load_df_param_from_file(c.DH_CONFIG_PATH)
    crypto_service = CryptoService(rsa_pri_path=c.PRI_KEY_PATH, p=p, g=g)
    chatting_service = ChattingService(user_addr_dict, auth_dict, crypto_service)
    password_hash_dict = util.load_pickle_file(c.PW_HASH_PATH)
    try:
        local_ip = socket.gethostbyname(socket.gethostname())
        print('Binding to ip: {}'.format(local_ip))
        serv_addr = (local_ip, port)
        serv = SocketServer.UDPServer(serv_addr, ClientRequestHandler)
        # dump the server address to config file for client using
        util.save(c.SERVER_CONFIG_PATH, serv_addr, True)
    except socket.error:
        print c.FAIL_SRV_INIT
        return

    print c.SRV_START

    try:
        serv.serve_forever()
    except KeyboardInterrupt:
        # Stop server when seeing ctrl-c
        pass
    except:
        print c.FAIL_SRV_START
    finally:
        serv.server_close()
Beispiel #15
0
 def visit(self, line):
     resultLine = ''
     lastIndex = 0
     template = re.compile("{|}")
     rezults = template.finditer(line)
     indentSize = Utilities.getIndentSize(line)
     firstChar = Utilities.findNextNonWhiteSpaceCharIndex(line, 0)
     for rezult in rezults:
         index = rezult.start()
         if not(Utilities.isInsideTextLiteral(line, index) or index == firstChar):
             resultLine += line[lastIndex:index]
             resultLine += '\n'
             if(line[index] == '{'):
                 resultLine += ' ' * indentSize
                 indentSize += 4 
             else:
                 indentSize -= 4           
                 resultLine += ' ' * indentSize
             resultLine += line[index]
             resultLine += '\n'
             lastIndex = Utilities.findNextNonWhiteSpaceCharIndex(line, index + 1 )
             if(lastIndex != -1):
                 resultLine += ' ' * indentSize
     if(lastIndex != -1):
         resultLine += line[lastIndex:]
     return resultLine
Beispiel #16
0
def main():
    robot = (1,1)
    dest = (-1,-1)

    opp = (0,.2)

    perp_point = get_perpendicular_point(robot, dest, opp)

    if perp_point is None:
        print "notevenclose"

    if do_i_need_to_avoid(opp, perp_point):

        # not sure what to do right now
        go_above = above_or_below(robot, dest, opp)
        # eventually do this

        crit_point = point_to_go_through(opp, perp_point)

        angle = Utilities.get_angle_between_points(robot[0], robot[1], crit_point[0], crit_point[1])

        c = Utilities.get_distance_between_points(robot[0], robot[1], dest[0], dest[1])

        a = c*np.cos(angle)
        b = c*np.sin(angle)

        x = robot[0] + a
        y = robot[1] + b

        print (x,y)

    else:
        print "do not need to avoid"
def drawColoredLogo(context):
	r = CGRectMake(0, 0, 100, 100)
	CGContextSaveGState(context)
        if 1:
		# Position the center of the rectangle on the left.
		CGContextTranslateCTM(context, 140, 140)
		# Rotate so that the rectangles are rotated 45 degrees 
		# about the current coordinate origin.
		CGContextRotateCTM(context, Utilities.DEGREES_TO_RADIANS(45))
		# Translate so that the center of the rect is at the previous origin.
		CGContextTranslateCTM(context, 
                        -r.size.width/2, -r.size.height/2)
		# Set the fill color to a purple color.
		CGContextSetFillColorWithColor(context, 
                        Utilities.getRGBOpaquePurpleColor())
		# Fill the first rectangle.
		CGContextFillRect(context, r)
		# Position to draw the right-most rectangle.
		CGContextTranslateCTM(context, 60, -60)
		# Set the fill color to a yellow color.
		CGContextSetFillColorWithColor(context, 
                        Utilities.getRGBOpaqueYellowColor())
		CGContextFillRect(context, r)
		
		# Position for the center rectangle.
		CGContextTranslateCTM(context, -30, +30)
		# Set the stroke color to an orange color.
		CGContextSetStrokeColorWithColor(context, 
                        Utilities.getRGBOpaqueOrangeColor())
		# Stroke the rectangle with a linewidth of 12.
		CGContextStrokeRectWithWidth(context, r, 12)
	CGContextRestoreGState(context)
Beispiel #18
0
 def calc_arvo_geo_measures(self, radius):
   """
   Calculates geometrical measures: volume, area
   
   """
   
   _temp_file = Utilities.get_random_name()
   _temp_file = "temp"
   
   # TODO: Need a way set the surface radius for a each atom. Probably add it as 
   #       as a new column in the atoms.in file.
   
   # prepare a temporary file
   self._writeATS(_temp_file, radius)
   
   command = "%s protein=%s" % ("/Users/Tomas/git/Analysis-Toolkit/thirdparty/arvo_c/arvo_c", _temp_file)
   output, stderr, status = Utilities.run_sub_process(command)
       
   # if the execution of the was successful:
   if not status:
     
     output_array = output.split()
     
     self.arvo_calc = True
     self.arvo_volume = np.float64(output_array[1])
     self.arvo_area = np.float64(output_array[3])
     self.arvo_spheres = np.int16(output_array[6])
 
   os.unlink(_temp_file)
Beispiel #19
0
    def __init__(self):
        screen = pygame.display.get_surface()
        self.area = screen.get_rect()

        self.fitness = random.randint(5,15)
        self.awareness = 70 + self.fitness
        self.speed = 50 + self.fitness
        self.radius = self.awareness
        # self.theta = 0
        # self.thetaVelocity = 1

        self.active = True
        self.target = None

        self.med = False
        self.high = False

        self.medFitness = Utilities.load_image('hunter2.png')
        self.highFitness = Utilities.load_image('hunter1.png')

        if(self.fitness >= 30):
            self.image, self.rect = self.highFitness
            self.high = True
        elif(self.fitness >= 5):
            self.image, self.rect = self.medFitness
            self.med = True

        self.tempTarget = (self.area.centerx, self.area.centery)

        self.rect.move(50,50)
        pygame.sprite.Sprite.__init__(self)

        print "CREATED HUNTER WITH FITNESS " + str(self.fitness) + " and speed " + str(self.speed) + " and awareness " + str(self.awareness)
Beispiel #20
0
def peakDetectionTest(path=None, noise=0.08):
    """Use pre-processing funcs to detect peaks"""
    if path == None:
        path = "testfiles"
    names = Utilities.createRandomStrings(8, 6)
    fname = os.path.join(path, "spectraldata.txt")
    peaks = Utilities.createSimulatedSpectralData(fname, names, noise=noise)
    conf = {
        "format": "databycolumn",
        "saveplots": 1,
        "marker": "-",
        "markers": "-,x",
        "alpha": 0.7,
        "normalise": 1,
        "function1": "smooth",
        "function2": "baselinecorrection",
        "function3": "detectpeaks",
    }
    p = Pipeline()
    p.createConfig("temp.conf", **conf)
    p.openRaw(fname)
    results = p.run()

    # compare predicted peaks
    successrates = []
    res = results[results.keys()[0]]
    for name in peaks:
        # print name, sorted(peaks[name]), results[name][0]
        orig = set(peaks[name])
        pred = set(res[name][0])
        s = float(len(orig.intersection(pred))) / (len(orig))
        successrates.append(s)
    return np.mean(successrates)
 def _getDict(cls, lang, pwl = "", pel = ""):
     """
     Protected classmethod to get a new dictionary.
     
     @param lang the language to be used as the default (string).
         The string should be in language locale format (e.g. en_US, de).
     @keyparam pwl name of the personal/project word list (string)
     @keyparam pel name of the personal/project exclude list (string)
     @return reference to the dictionary (enchant.Dict)
     """
     if not pwl:
         pwl = unicode(Preferences.getEditor("SpellCheckingPersonalWordList"))
         if not pwl:
             pwl = os.path.join(Utilities.getConfigDir(), "spelling", "pwl.dic")
         d = os.path.dirname(pwl)
         if not os.path.exists(d):
             os.makedirs(d)
     
     if not pel:
         pel = unicode(Preferences.getEditor("SpellCheckingPersonalExcludeList"))
         if not pel:
             pel = os.path.join(Utilities.getConfigDir(), "spelling", "pel.dic")
         d = os.path.dirname(pel)
         if not os.path.exists(d):
             os.makedirs(d)
     
     try:
         d = enchant.DictWithPWL(lang, pwl, pel)
     except:
         # Catch all exceptions, because if pyenchant isn't available, you
         # can't catch the enchant.DictNotFound error.
         d = None
     return d
Beispiel #22
0
    def on_diffButton_clicked(self):
        """
        Private slot to handle the Compare button press.
        """
        filename1 = Utilities.toNativeSeparators(self.file1Edit.text())
        try:
            f1 = open(filename1, "r", encoding="utf-8")
            lines1 = f1.readlines()
            f1.close()
        except IOError:
            E5MessageBox.critical(
                self,
                self.tr("Compare Files"),
                self.tr(
                    """<p>The file <b>{0}</b> could not be read.</p>""")
                .format(filename1))
            return

        filename2 = Utilities.toNativeSeparators(self.file2Edit.text())
        try:
            f2 = open(filename2, "r", encoding="utf-8")
            lines2 = f2.readlines()
            f2.close()
        except IOError:
            E5MessageBox.critical(
                self,
                self.tr("Compare Files"),
                self.tr(
                    """<p>The file <b>{0}</b> could not be read.</p>""")
                .format(filename2))
            return
        
        self.__compare(lines1, lines2)
Beispiel #23
0
def gather_pb8k(file_path, variances, N):
    class ParseCallback(object):
        def __init__(self, variance, N):
            self.variance = variance
            self.N = N
            self.index = {}
            self.results = {}

        def __call__(self, line_number, comps):
            if line_number==0:
                return
            gene = comps[PB8KFileInfo.TF1.HUGO]
            if "," in gene:
                multiple_genes = gene.split(",")
                for g in multiple_genes:
                    row = row_from_comps(comps, PB8KFileInfo.TF1, self.variance, self.N)
                    self.process_row(g, row)
            else:
                row = row_from_comps(comps, PB8KFileInfo.TF1, self.variance, self.N)
                self.process_row(gene, row)

        def process_row(self, gene, row):
            if row is None:
                return
            snp = row[1]
            self.index[snp] = gene

            if not gene in self.results:
                self.results[gene] = []
            self.results[gene].append(row)

    callback = ParseCallback(variances, N)
    Utilities.parse_file(file_path, callback, expect_throw=True)
    return callback.index, callback.results
Beispiel #24
0
def myDrawRedBlackCheckerBoardPattern(info, patternCellContext):
    # This pattern proc draws a red and a black rectangle
    # patch representing the minimum cell needed to paint a
    # checkerboard with that pattern.
    #
    # Each 'cell' of the checkerboard is 2 units on a side.
    #
    # This code uses CGColorRefs which are available in Panther
    # and later only. Patterns are available in all versions of
    # Mac OS X but this code uses CGColorRefs for convenience
    # and efficiency.

    # Paint a black checkerboard box.
    CGContextSetFillColorWithColor(patternCellContext,
            Utilities.getRGBOpaqueBlackColor())
    # This is a 1x1 unit rect whose origin is at 0,0 in pattern space.
    CGContextFillRect(patternCellContext, CGRectMake(0.0, 0.0, 1.0, 1.0))
    # This is a 1x1 unit rect whose origin is at 1,1 in pattern space.
    CGContextFillRect(patternCellContext, CGRectMake(1.0, 1.0, 1.0, 1.0))

    # Paint a red checkerboard box.
    CGContextSetFillColorWithColor(patternCellContext,
            Utilities.getRGBOpaqueRedColor())
    # This is a 1x1 unit rect whose origin is at 1,0 in pattern space,
    # that is, immediately to the right of first black checkerboard box.
    CGContextFillRect(patternCellContext, CGRectMake(1.0, 0.0, 1.0, 1.0))
    # This is a 1x1 unit rect whose origin is at 0,1 in pattern space,
    # that is, immediately above the first black checkerboard box.
    CGContextFillRect(patternCellContext, CGRectMake(0.0, 1.0, 1.0, 1.0))
def detectMissingModules(folder, outputFile):
    #print("%s" % (inspect.stack()[0][3]))
    classNamesSet = set()
    includeClassSet = set()
    for abspath, dirs, files in os.walk(folder):
        for file in files:
            #print(file)
            if file.endswith(".pp") and not os.path.islink(os.path.join(abspath, file)):
                #print(file)
                fileObj = SourceModel.SM_File.SM_File(os.path.join(abspath, file))
                classNames, fileIncludes = detectMissingClassesByInclude(fileObj, outputFile)
                #print("Classes: %s" % ','.join(n for n in classNames))
                classNamesSet = classNamesSet.union(classNames)
                #print("Union with %s: %s" % (','.join(n for n in classNames), ','.join(n for n in classNamesSet)))
                includeClassSet = includeClassSet.union(fileIncludes)
    #print("%s: Classes: %s" % (inspect.stack()[0][3], ','.join(n for n in classNamesSet)))
    #print("%s: Class includes: %s" % (inspect.stack()[0][3], ','.join(i for i in includeClassSet)))
    missingDependencySet = includeClassSet.difference(classNamesSet)
    #print(includeClassSet)
    #print(classNamesSet)
    #print(missingDependencySet)
    Utilities.myPrint("Missing dependency set: %s" % ','.join(c for c in missingDependencySet))
    #for md in missingDependencySet:
        #Utilities.reportSmell(outputFile, folder, CONSTS.SMELL_MIS_DEP, CONSTS.FILE_RES)
    with open('missDependencies.puppeteer.txt', 'a+') as f:
        for md in missingDependencySet:
            f.write("%s\n" % md)
            Utilities.reportSmell(outputFile, folder, CONSTS.SMELL_MIS_DEP, CONSTS.FILE_RES)
Beispiel #26
0
    def build(self, features, labels, lamda, term_fun, thresh, is_batch=True):
        # construct x with the bias column
        x = [[1] + f.tolist() for f in features]
        x = np.array(x)
        y = np.array([[l] for l in labels])

        # initialize the theta and iteration counter
        # theta = np.zeros((len(x[0]), 1))
        theta = np.array([[random.random()] for i in range(len(x[0]))])

        self.iter_count = 0
        acc_count = [0, 0]

        # recursively update theta
        while not term_fun(theta, features, y, thresh, acc_count):
        # while not term_fun(theta, features, y, self.accs, thresh):
            if is_batch:
                hx = np.array(util.logistic_fun_batch(theta, features))
                diffs = y - hx
                for j in range(len(theta)):
                    sum = 0
                    for i in range(len(diffs)):
                        sum += diffs[i][0] * x[i][j]
                    theta[j][0] = theta[j][0] + lamda * sum
            else:
                for i in range(len(x)):
                    hx = util.logistic_fun(theta, x[i])
                    diff = y[i][0] - hx
                    for j in range(len(theta)):
                        theta[j][0] = theta[j][0] + lamda * diff * x[i][j]
            self.iter_count += 1
            self.theta = theta
Beispiel #27
0
    def drawWithClippingMask(context, theMaskingImageURL, maskwidth, maskheight):
        # An array of CGColor objects.
        colors = (
                Utilities.getRGBOpaqueDarkGreenColor(), 
                Utilities.getRGBOpaqueDarkBlueColor(),  
                Utilities.getRGBOpaqueBlueColor(), 
		Utilities.getRGBOpaqueRedColor())
        maskBitsPerComponent = 8
        bytesPerRow = ( (maskwidth * maskBitsPerComponent) + 7)/8
        shouldInterpolate = True
        maskDataProvider = CGDataProviderCreateWithURL(theMaskingImageURL)
    
        if maskDataProvider is None:
            print >>sys.stderr, "Couldn't create Image Mask provider!"
	    return
        mask = CGImageMaskCreate(maskwidth, maskheight, maskBitsPerComponent,
                                        maskBitsPerComponent, maskwidth,
                                        maskDataProvider, None, shouldInterpolate)
        del maskDataProvider

        if mask is None:
            print >>sys.stderr, "Couldn't create Image Mask!"
            return

        maskRect = CGRectMake(0, 0, maskwidth/3, maskheight/3)

        # Position for drawing the mask at the left side of the figure.
        CGContextTranslateCTM(context, 50, 50 )
        # Set the context fill color to a CGColor object that is black.
        CGContextSetFillColorWithColor(context, getRGBOpaqueBlackColor())
        # Draw the mask. It is painted with with the black fill color.
        CGContextDrawImage(context, maskRect, mask)

        # Position to the right of the mask just painted.
        CGContextTranslateCTM(context, CGRectGetWidth(maskRect) + 25,  0)

        # Clip to the mask.
        CGContextClipToMask(context, maskRect, mask)
        # Release the mask since this code no longer needs it.
        del mask

        # Make a rect that has a width and height 1/3 that of the image mask.
        rect = CGRectMake(0, 0, CGRectGetWidth(maskRect)/3, CGRectGetHeight(maskRect)/3)

        CGContextTranslateCTM(context, 0, 2*CGRectGetHeight(rect))
        
        # Draw a 3 x 3 grid of rectangles, setting the color for each rectangle
        # by cycling through the array of CGColor objects in the 'colors' array.
        for j in range(3):
            CGContextSaveGState(context)
            for i in range(3):
                # Draw a row of rectangles.
                # Set the fill color using one of the CGColor objects in the 
                # colors array.	    
                CGContextSetFillColorWithColor(context, colors[(i+j) % 4])
                CGContextFillRect(context, rect)
                CGContextTranslateCTM(context, CGRectGetWidth(rect), 0)
            CGContextRestoreGState(context)
            # Position to draw the next row.
            CGContextTranslateCTM(context, 0, -CGRectGetHeight(rect))
def printAccessionNumbersFromName(filePath,dbFolderPath,outFolder):
    file = open(filePath,"r")
    o = open(outFolder+"/filter.txt","w")
    org = []
    accessions = []
    for line in file:
        temp = '_'.join(line.split("_")[:2])
        org.append(temp.strip())
    for dir in util.return_recursive_dir_files(dbFolderPath):
        dirSplit = dir.split("/")
        organism = ""
        accession = ""
        for f in util.return_recursive_files(dir):
            #print f
            seq_record = SeqIO.parse(open(f), "genbank").next()
            accession = seq_record.annotations['accessions'][0]
            organism_tmp = seq_record.annotations['organism'].replace(' ', '_')
            organism_tmp_1 = re.sub('[\[\]]', "", organism_tmp)
            organism = '_'.join(organism_tmp_1.split('_')[:2])#+"_"+accession
        try:
            if(org.index(organism) >= 0):
                accessions.append(accession)
        except:
            #print "none"
            pass
    for accesion in accessions:
        o.write(accesion+"\n")            
    o.close()
    file.close()
Beispiel #29
0
 def __findDuplicates(self, cond, special, showMessage=False,
                      index=QModelIndex()):
     """
     Private method to check, if an entry already exists.
     
     @param cond condition to check (string)
     @param special special condition to check (string)
     @param showMessage flag indicating a message should be shown,
         if a duplicate entry is found (boolean)
     @param index index that should not be considered duplicate
         (QModelIndex)
     @return flag indicating a duplicate entry (boolean)
     """
     idx = self.__model.getWatchPointIndex(cond, special)
     duplicate = idx.isValid() and \
         idx.internalPointer() != index.internalPointer()
     if showMessage and duplicate:
         if not special:
             msg = self.tr("""<p>A watch expression '<b>{0}</b>'"""
                           """ already exists.</p>""")\
                 .format(Utilities.html_encode(cond))
         else:
             msg = self.tr(
                 """<p>A watch expression '<b>{0}</b>'"""
                 """ for the variable <b>{1}</b> already exists.</p>""")\
                 .format(special, Utilities.html_encode(cond))
         E5MessageBox.warning(
             self,
             self.tr("Watch expression already exists"),
             msg)
     
     return duplicate
Beispiel #30
0
def replicatesTest():
    """Tests handling of replicates"""

    p = Pipeline()
    conf = {
        "format": "databycolumn",
        "groupbyname": 1,
        "parsenamesindex": 1,
        "parsemethod": "numeric",
        "replicates": 1,
        "model1": "linear",
        "variable1": "a",
        "model2": "sigmoid",
        "variable2": "tm",
    }
    p.createConfig("temp.conf", **conf)
    reps = ["rep1", "rep2", "rep3"]
    path = "testfiles/replicates"
    Utilities.createDirectory(path)
    names = Utilities.createRandomStrings(3, 6)
    for r in reps:
        rpath = os.path.join(path, r)
        Utilities.createGroupedData(rpath, names=names)
    p.addFolder(path)
    p.run()
    return
    def prepare_image(self, n):

        self.second_image = []

        #read in image n
        path = self.directory + Constants.working_directory + Constants.image_directory + Constants.reduced_prefix
        temp_image = Utilities.get_image_data(path, n)

        set, i = Utilities.n_to_set_and_n(n)

        file = path + Constants.file_name + "_" + str(
            set) + "_" + Utilities.format_index(i) + Constants.fits_extension
        head = getheader(file, ignore_missing_end=True)

        head = getheader(path + Constants.file_name + "_" + str(set) + "_" +
                         Utilities.format_index(i) + Constants.fits_extension,
                         ignore_missing_end=True)

        #read in the image n-1
        first_image = Utilities.get_image_data(path, n - 1)

        #calculate the shift between the these two images
        x_shift = self.x_shifts[n - 1] - self.x_shifts[n - 2]
        y_shift = self.y_shifts[n - 1] - self.y_shifts[n - 2]

        #calculate the mean of the preceding image
        mean = np.mean(first_image)

        for i in range(len(temp_image)):
            for j in range(len(temp_image[0])):

                #find the expected position of pixel (j, i) from image n in
                #image n-1. This is done to align the images
                y = int(i - round(y_shift))
                x = int(j - round(x_shift))

                #if the expected position is within the bounds of the image
                if x > 0 and y > 0 and x < len(
                        temp_image[0]) and y < len(temp_image):

                    #divide the count stored in pixel (j, i) by the count stored
                    #in the equivalent pixel in the preceding image divided by the
                    #mean of the preceding image.
                    temp_image[i][j] = temp_image[i][j] / (first_image[y][x] /
                                                           mean)

        #calculate median of image n
        median = np.median(temp_image)

        #loop through each pixel in image n
        for i in range(len(temp_image)):

            #prepare array to store the final version of image n for scanning
            self.second_image.append([])

            for j in range(len(temp_image[0])):

                #if the count in pixel (j, i) is greater than the median
                if temp_image[i][j] > median:

                    #stores the number of pixels surrounding this pixel
                    #that have a count higher than the median
                    count = 0

                    #find number of surrounding pixels which have a count higher
                    #than the median
                    for k in range(-1, 2):
                        for l in range(-1, 2):

                            if not (k == 0 and l == 0):

                                ik = i + k
                                jl = j + l

                                if ik >= 0 and jl >= 0 and ik < len(
                                        temp_image) and jl < len(
                                            temp_image[0]):

                                    if temp_image[ik][jl] > median:
                                        count += 1

                    #if the number of surrounding pixels with a count higher
                    #than the median is less that 3, append the median rather
                    #than the actual pixel value to the final version of the image
                    if count < 3:
                        self.second_image[i].append(median)
                    else:
                        self.second_image[i].append(temp_image[i][j])

                #pixels with a count lower than the median are automatically
                #inserted into the final version
                else:

                    self.second_image[i].append(temp_image[i][j])
Beispiel #32
0
 def slow_print_self(self):
     Util.slow_print(str(self))
Beispiel #33
0
def readmodule_ex(module, path=[]):
    """
    Read a CORBA IDL file and return a dictionary of classes, functions and
    modules.

    @param module name of the CORBA IDL file (string)
    @param path path the file should be searched in (list of strings)
    @return the resulting dictionary
    """
    global _modules

    dict = {}
    dict_counts = {}

    if module in _modules:
        # we've seen this file before...
        return _modules[module]

    # search the path for the file
    f = None
    fullpath = list(path)
    f, file, (suff, mode, type) = ClassBrowsers.find_module(module, fullpath)
    if f:
        f.close()
    if type not in SUPPORTED_TYPES:
        # not CORBA IDL source, can't do anything with this module
        _modules[module] = dict
        return dict

    _modules[module] = dict
    classstack = []  # stack of (class, indent) pairs
    indent = 0
    try:
        src = Utilities.readEncodedFile(file)[0]
    except (UnicodeError, IOError):
        # can't do anything with this module
        _modules[module] = dict
        return dict

    lineno, last_lineno_pos = 1, 0
    lastGlobalEntry = None
    cur_obj = None
    i = 0
    while True:
        m = _getnext(src, i)
        if not m:
            break
        start, i = m.span()

        if m.start("Method") >= 0:
            # found a method definition or function
            thisindent = indent
            meth_name = m.group("MethodName")
            meth_sig = m.group("MethodSignature")
            meth_sig = meth_sig and meth_sig.replace('\\\n', '') or ''
            meth_sig = _commentsub('', meth_sig)
            meth_sig = _normalize(' ', meth_sig)
            lineno = lineno + src.count('\n', last_lineno_pos, start)
            last_lineno_pos = start
            # close all interfaces/modules indented at least as much
            while classstack and \
                    classstack[-1][1] >= thisindent:
                if classstack[-1][0] is not None:
                    # record the end line
                    classstack[-1][0].setEndLine(lineno - 1)
                del classstack[-1]
            if classstack:
                # it's an interface/module method
                cur_class = classstack[-1][0]
                if isinstance(cur_class, Interface) or \
                        isinstance(cur_class, Module):
                    # it's a method
                    f = Function(None, meth_name, file, lineno, meth_sig)
                    cur_class._addmethod(meth_name, f)
                # else it's a nested def
                else:
                    f = None
            else:
                # it's a function
                f = Function(module, meth_name, file, lineno, meth_sig)
                if meth_name in dict_counts:
                    dict_counts[meth_name] += 1
                    meth_name = "{0}_{1:d}".format(meth_name,
                                                   dict_counts[meth_name])
                else:
                    dict_counts[meth_name] = 0
                dict[meth_name] = f
            if not classstack:
                if lastGlobalEntry:
                    lastGlobalEntry.setEndLine(lineno - 1)
                lastGlobalEntry = f
            if cur_obj and isinstance(cur_obj, Function):
                cur_obj.setEndLine(lineno - 1)
            cur_obj = f
            classstack.append((f, thisindent))  # Marker for nested fns

        elif m.start("String") >= 0:
            pass

        elif m.start("Comment") >= 0:
            pass

        elif m.start("Interface") >= 0:
            # we found an interface definition
            thisindent = indent
            indent += 1
            # close all interfaces/modules indented at least as much
            while classstack and \
                    classstack[-1][1] >= thisindent:
                if classstack[-1][0] is not None:
                    # record the end line
                    classstack[-1][0].setEndLine(lineno - 1)
                del classstack[-1]
            lineno = lineno + src.count('\n', last_lineno_pos, start)
            last_lineno_pos = start
            class_name = m.group("InterfaceName")
            inherit = m.group("InterfaceSupers")
            if inherit:
                # the interface inherits from other interfaces
                inherit = inherit[1:].strip()
                inherit = [_commentsub('', inherit)]
            # remember this interface
            cur_class = Interface(module, class_name, inherit, file, lineno)
            if not classstack:
                dict[class_name] = cur_class
            else:
                cls = classstack[-1][0]
                cls._addclass(class_name, cur_class)
            if not classstack:
                if lastGlobalEntry:
                    lastGlobalEntry.setEndLine(lineno - 1)
                lastGlobalEntry = cur_class
            if cur_obj and isinstance(cur_obj, Function):
                cur_obj.setEndLine(lineno - 1)
            cur_obj = cur_class
            classstack.append((cur_class, thisindent))

        elif m.start("Module") >= 0:
            # we found a module definition
            thisindent = indent
            indent += 1
            # close all interfaces/modules indented at least as much
            while classstack and \
                    classstack[-1][1] >= thisindent:
                if classstack[-1][0] is not None:
                    # record the end line
                    classstack[-1][0].setEndLine(lineno - 1)
                del classstack[-1]
            lineno = lineno + src.count('\n', last_lineno_pos, start)
            last_lineno_pos = start
            module_name = m.group("ModuleName")
            # remember this module
            cur_class = Module(module, module_name, file, lineno)
            if not classstack:
                dict[module_name] = cur_class
                if lastGlobalEntry:
                    lastGlobalEntry.setEndLine(lineno - 1)
                lastGlobalEntry = cur_class
            if cur_obj and isinstance(cur_obj, Function):
                cur_obj.setEndLine(lineno - 1)
            cur_obj = cur_class
            classstack.append((cur_class, thisindent))

        elif m.start("Attribute") >= 0:
            lineno = lineno + src.count('\n', last_lineno_pos, start)
            last_lineno_pos = start
            index = -1
            while index >= -len(classstack):
                if classstack[index][0] is not None and \
                   not isinstance(classstack[index][0], Function) and \
                   not classstack[index][1] >= indent:
                    attributes = m.group("AttributeNames").split(',')
                    ro = m.group("AttributeReadonly")
                    for attribute in attributes:
                        attr = Attribute(module, attribute, file, lineno)
                        if ro:
                            attr.setPrivate()
                        classstack[index][0]._addattribute(attr)
                    break
                else:
                    index -= 1
                    if lastGlobalEntry:
                        lastGlobalEntry.setEndLine(lineno - 1)
                    lastGlobalEntry = None

        elif m.start("Begin") >= 0:
            # a begin of a block we are not interested in
            indent += 1

        elif m.start("End") >= 0:
            # an end of a block
            indent -= 1

        else:
            assert 0, "regexp _getnext found something unexpected"

    return dict
 def screen_height(self, height):
     return Utilities.checkDimensions(height)
 def screen_width(self, width):
     return Utilities.checkDimensions(width)
    def output_results(self):

        #get time of observation for each image
        times_file = self.directory + Constants.working_directory + Constants.time_file
        times = [line.rstrip('\n') for line in open(times_file)]

        #make streak folder in results folder
        output_dir = self.directory + Constants.working_directory + Constants.output_directory + Constants.streak_folder
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        #path to which the table fo streaks is written
        streak_file = output_dir + Constants.streak_file

        f = open(streak_file, "w")
        f.write(
            "id  time                 xcentroid         ycentroid         ra                dec               RA/s               Dec/s              "
        )

        #stores the number of consecutive images with streaks
        count = 0
        #identifier for streak
        objid = 1

        #ra and dec for an individual streak
        ras = []
        decs = []

        #time each image was taken for a consecutive set of images which contains
        #a streak
        streak_times = []

        #loop through array of streaks found. This array contains an x and y position
        #for the centre of each streak in each image (one element for each image).
        #If no streaks were found in that image, the element is empty.
        #Here we are checking for a consecutive set of images which contain a streak.

        for i in range(len(self.streaks)):

            #if image i contains a streak
            if len(self.streaks[i]) > 0:

                count += 1

                #if the streak is at least 150 pixels away from the edge of the image,
                #record its x and y position and the time at which it was at
                #that position. For use in calculating object velocity
                if Utilities.is_within_boundaries(self.streaks[i][0][0],
                                                  self.streaks[i][0][1],
                                                  Constants.image_width,
                                                  Constants.image_height, 150):

                    time_string = times[i]
                    time_units = time_string.split("T")
                    time_units = time_units[0].split(
                        "-") + time_units[1].split(":")
                    time = datetime.datetime(int(time_units[0]),
                                             int(time_units[1]),
                                             int(time_units[2]),
                                             int(time_units[3]),
                                             int(time_units[4]),
                                             int(time_units[5]))

                    #add half the exposure time to the time of observation.
                    #This is done because the positions stored are at the centre
                    #of the streak. The object is at this position halfway
                    #through the exposure .
                    time = time + datetime.timedelta(0, 17.5)

                    streak_times.append(time)

                    #use world coordinate system from Cataloguer to find
                    #the RA and Dec of the centre of the streak
                    ra, dec = self.cataloguer.wcs.all_pix2world(
                        self.streaks[i][0][0], self.streaks[i][0][1], 0)

                    ras.append(ra)
                    decs.append(dec)

            #if image i does not contain a streak, or the end of the array has been reached
            if len(self.streaks[i]) == 0 or i == len(self.streaks) - 1:

                #if the minimum number of consecutive images with a streak has
                #been reached
                if count > 3:

                    #find a time associated with one of the images and the
                    #ra and dec of the centre of the streak in that image
                    time = streak_times[len(streak_times) - 2]
                    ra, dec = self.cataloguer.wcs.all_pix2world(
                        self.streaks[i - 2][0][0], self.streaks[i - 2][0][1],
                        0)

                    #prepare string for output
                    output_string = "\r\n" + Utilities.format_index(
                        objid
                    ) + " " + time.strftime("%m/%d/%Y, %H:%M:%S") + " " + str(
                        self.streaks[i - 2][0][0]) + " " + str(self.streaks[
                            i - 2][0][1]) + " " + str(ra) + " " + str(dec)

                    avg_ra_velocity = 0
                    avg_dec_velocity = 0

                    #calculate the angular velocity in terms of RA/s and Dec/s
                    #in arcseconds/s by finding the mean change in RA and Dec of
                    #the centre of the streak in consecutive images and dividing
                    #this by the exposure time
                    for j in range(len(ras) - 1):

                        time1 = streak_times[j]
                        time2 = streak_times[j + 1]

                        diff = (time2 - time1).total_seconds()
                        avg_ra_velocity += (ras[j + 1] - ras[j]) / diff
                        avg_dec_velocity += (decs[j + 1] - decs[j]) / diff

                    avg_ra_velocity = avg_ra_velocity / (len(ras) - 1)
                    avg_dec_velocity = avg_dec_velocity / (len(decs) - 1)

                    #add angular velocities to output string (*3600 to convert to arcseconds/s)
                    output_string += " " + str(
                        avg_ra_velocity * 3600) + " " + str(
                            avg_dec_velocity * 3600)

                    #write output string to file
                    f.write(output_string)

                    #create and save thumnbnail of the streak in the results/streaks folder
                    im = self.ff.get_thumbnail(i - 2,
                                               self.streaks[i - 3][0][0],
                                               self.streaks[i - 3][0][1], 100,
                                               False)
                    plt.axis('off')
                    plt.imshow(im, origin='lower', cmap=plt.cm.inferno)
                    plt.savefig(output_dir + "streak_" +
                                Utilities.format_index(objid) + ".jpg")

                    #increment object id (so that each streak has a unique ID)
                    objid += 1

                #since the current image has no streak, or the end of the streak
                #array has been reached, the number of consecutive images is
                #reset to zero
                count = 0
                x_pos = []
                y_pos = []
                streak_times = []
    def find_streaks(self, n):

        #stores x and y positions of the centres of any streaks found in the
        #image
        streaks = []

        #stores the pixels that make up each streak
        streak_pixels = []

        #refer to image n as 'data' (for simplicity)
        data = self.second_image

        median = np.median(data)

        #stores all pixels with counts greater than the median which have been scanned
        all_pixels = set()

        #stores all pixels which form streaks in the image
        all_streaks = set()

        #loop through each pixel in the image
        for i in range(len(data)):

            for j in range(len(data[0])):

                #if the count in the pixel is greater than 1.04 times the median
                #and it has not been scanned already
                if data[i][j] > median * 1.04 and not string in all_pixels:

                    #form string storing x and y position of pixel, to be stored
                    #in the various sets of pixels. Cannot simply use an array
                    #as these cannot be inserted into a set.
                    string = str(i) + " " + str(j)

                    completed = False
                    to_scan = []
                    pixels = []
                    pixels.append(string)
                    to_scan.append(string)
                    all_pixels.add(string)

                    while not completed:
                        arr = to_scan[0].split(" ")
                        y = int(arr[0])
                        x = int(arr[1])

                        found_pixels = self.find_pixels(
                            x, y, data, median, all_pixels)
                        if len(pixels) > 2000:
                            completed = True
                        pixels = pixels + found_pixels
                        to_scan = to_scan + found_pixels

                        to_scan.pop(0)

                        if len(to_scan) == 0:
                            completed = True

                    if len(pixels) > 60:

                        x_centre = 0
                        y_centre = 0
                        max_dist = 0

                        for i in range(len(pixels)):

                            arr = pixels[i].split(" ")
                            y = int(arr[0])
                            x = int(arr[1])
                            x_centre += x
                            y_centre += y

                            for k in range(i, len(pixels)):
                                if not k == i:

                                    arr = pixels[k].split(" ")
                                    y2 = int(arr[0])
                                    x2 = int(arr[1])

                                    dist = ((x2 - x)**2 + (y2 - y)**2)**0.5

                                    if dist > max_dist:
                                        max_dist = dist

                        x_centre = int(round(x_centre / len(pixels)))
                        y_centre = int(round(y_centre / len(pixels)))

                        if not Utilities.is_within_boundaries(
                                x_centre, y_centre, len(data[0]), len(data),
                                30):
                            continue

                        vector = [1, 0]
                        occupancies = []
                        max_occupancy = 0
                        max_vector = 0

                        for angle in range(0, 180):

                            count = 0
                            line_pixels = set()

                            for a in range(-30, 31):
                                for b in range(-30, 31):
                                    y = y_centre + a
                                    x = x_centre + b

                                    if self.distance_to_line(
                                        [x_centre, y_centre], vector,
                                        [x, y]) < 3:
                                        line_pixels.add(str(y) + " " + str(x))

                            for pix in line_pixels:
                                if pix in pixels:
                                    count = count + 1

                            occupancy = count / len(line_pixels)
                            occupancies.append(occupancy)

                            # if occupancy > max_occupancy:
                            #    max_occupancy = occupancy
                            #   max_vector = vector

                            vector = self.rotate(vector, 1)

                        standard_deviation = Utilities.standard_deviation(
                            occupancies)
                        mean = np.mean(occupancies)

                        if not standard_deviation < 0.4 * mean:

                            streak_pixels.append(pixels)

                            #all_streaks = all_streaks|set(pixels)

        if len(streak_pixels) == 0:
            return False

        if len(streak_pixels) > 1:
            completed = False
            i = 0

            while not completed:

                same_object = False

                for pixel1 in streak_pixels[i]:

                    if same_object:

                        streak_pixels[i +
                                      1] = streak_pixels[i +
                                                         1] + streak_pixels[i]
                        streak_pixels.pop(i)
                        i -= 1

                        break

                    for pixel2 in streak_pixels[i + 1]:

                        arr = pixel1.split(" ")
                        y1 = int(arr[0])
                        x1 = int(arr[1])

                        arr = pixel2.split(" ")
                        y2 = int(arr[0])
                        x2 = int(arr[1])

                        dist = ((x2 - x1)**2 + (y2 - y1)**2)**0.5

                        if dist < 100:
                            same_object = True
                            break
                i += 1

                if i == len(streak_pixels) - 1:
                    completed = True

        for pixels in streak_pixels:

            x_centre = 0
            y_centre = 0

            for i in range(len(pixels)):

                arr = pixels[i].split(" ")
                y = int(arr[0])
                x = int(arr[1])
                x_centre += x
                y_centre += y

            x_centre = x_centre / len(pixels)
            y_centre = y_centre / len(pixels)
            streaks.append([x_centre, y_centre])

        self.streaks[n - 1] = streaks

        # =============================================================================
        #         for string in all_streaks:
        #
        #             arr = string.split(" ")
        #             y = int(arr[0])
        #             x = int(arr[1])
        #
        #             self.second_image[y][x] = 30000
        #
        #         path = self.directory + Constants.working_directory + Constants.image_directory + Constants.reduced_prefix
        #
        #         image = Utilities.get_image_data(path, 189)
        #
        #         set_n, i = Utilities.n_to_set_and_n(189)
        #
        #         file = path + Constants.file_name + "_" + str(set_n) + "_" + Utilities.format_index(i) + Constants.fits_extension
        #         head=getheader(file ,ignore_missing_end=True)
        #
        #         hdu = PrimaryHDU(self.second_image, head)
        #         hdul = HDUList([hdu], None)
        #         hdul.writeto(self.directory + Constants.working_directory + "testimage.fits", overwrite=True)
        #
        # =============================================================================
        if not len(streaks) == 0:
            return True

        return False
Beispiel #38
0
def dispense_samples(args, labware_dict, sample_data_dict, sample_parameters,
                     left_pipette, right_pipette, water_aspirated):
    """
    Dilute and dispense samples
    @param args:
    @param labware_dict:
    @param sample_data_dict:
    @param sample_parameters:
    @param left_pipette:
    @param right_pipette:
    @param water_aspirated:
    """

    bottom_offset = float(args.BottomOffset)
    dilution_labware = labware_dict[args.DilutionPlateSlot]
    sample_destination_labware = labware_dict[args.PCR_PlateSlot]
    reagent_labware = labware_dict[args.ReagentSlot]
    water_res_well_dia = reagent_labware[args.WaterWell].diameter
    cone_vol = Utilities.labware_cone_volume(args, reagent_labware)
    water_tip_height = Utilities.res_tip_height(
        float(args.WaterResVol) - water_aspirated, water_res_well_dia,
        cone_vol, bottom_offset)
    dilution_plate_layout = Utilities.plate_layout()
    dilution_well_index = 0

    for sample_key in sample_parameters:
        sample_source_labware = labware_dict[sample_parameters[sample_key][0]]
        sample_source_well = sample_parameters[sample_key][1]
        sample_dest_wells = sample_data_dict[sample_key][3]
        sample_vol = sample_data_dict[sample_key][0]
        diluent_vol = sample_data_dict[sample_key][1]
        diluted_sample_vol = sample_data_dict[sample_key][2]

        # If no dilution is necessary, dispense sample and continue
        if diluted_sample_vol == 0:
            sample_pipette, sample_loop, sample_vol = \
                Utilities.pipette_selection(left_pipette, right_pipette, sample_vol)
            for well in sample_dest_wells:
                Utilities.dispensing_loop(
                    args,
                    sample_loop,
                    sample_pipette,
                    sample_source_labware[sample_source_well],
                    sample_destination_labware[well],
                    sample_vol,
                    NewTip=True,
                    MixReaction=True,
                    touch=True)
            continue

        # Adjust volume of diluted sample to make sure there is enough
        diluted_template_needed = round(
            diluted_sample_vol * (len(sample_dest_wells) + 1.5), 2)
        diluted_template_factor = round(
            diluted_template_needed / (sample_vol + diluent_vol), 2)
        '''
        diluted_template_on_hand = sample_vol+diluent_vol
        diluted_template_factor = 1.0
        if diluted_template_needed <= diluted_template_on_hand:
            diluted_template_factor = diluted_template_needed/diluted_template_on_hand
            if diluted_template_factor <= 1.5 and (sample_vol * diluted_template_factor) < 10:
                diluted_template_factor = 2.0
        '''
        adjusted_sample_vol = round((sample_vol * diluted_template_factor), 1)
        diluent_vol = round((diluent_vol * diluted_template_factor), 1)

        # Reset the pipettes for the new volumes
        diluent_pipette, diluent_loop, diluent_vol = \
            Utilities.pipette_selection(left_pipette, right_pipette, diluent_vol)
        sample_pipette, sample_loop, sample_vol = \
            Utilities.pipette_selection(left_pipette, right_pipette, adjusted_sample_vol)

        # Make dilution, diluent first
        dilution_well = dilution_plate_layout[dilution_well_index]
        Utilities.dispensing_loop(
            args,
            diluent_loop,
            diluent_pipette,
            reagent_labware[args.WaterWell].bottom(water_tip_height),
            dilution_labware[dilution_well],
            diluent_vol,
            NewTip=True,
            MixReaction=False,
            touch=True)

        Utilities.dispensing_loop(args,
                                  sample_loop,
                                  sample_pipette,
                                  sample_source_labware[sample_source_well],
                                  dilution_labware[dilution_well],
                                  sample_vol,
                                  NewTip=True,
                                  MixReaction=True)
        water_aspirated += diluent_vol
        dilution_well_index += 1
        water_tip_height = \
            Utilities.res_tip_height(float(args.WaterResVol)-water_aspirated, water_res_well_dia, cone_vol,
                                     bottom_offset)

        # Add diluted sample to PCR plate
        for well in sample_dest_wells:
            sample_pipette, sample_loop, diluted_sample_vol = \
                Utilities.pipette_selection(left_pipette, right_pipette, diluted_sample_vol)

            Utilities.dispensing_loop(
                args,
                sample_loop,
                sample_pipette,
                dilution_labware[dilution_well].bottom(bottom_offset),
                sample_destination_labware[well],
                diluted_sample_vol,
                NewTip=True,
                MixReaction=True)

        if sample_pipette.has_tip:
            sample_pipette.drop_tip()

    return water_aspirated
Beispiel #39
0
def run(ctx: protocol_api.ProtocolContext):

    ctx.comment("Begin {}".format(metadata['protocolName']))

    # Turn on rail lights and pause program so user can load robot deck.
    # ctx.set_rail_lights(True)
    # ctx.pause("Load Labware onto robot deck and click resume when ready to continue")
    # ctx.home()
    ctx.set_rail_lights(False)

    # TSV file location on OT-2
    tsv_file_path = "{0}var{0}lib{0}jupyter{0}notebooks{0}ProcedureFile.tsv".format(
        os.sep)

    # If not on the OT-2, get temp TSV file location on Win10 Computers for simulation
    if not os.path.isfile(tsv_file_path):
        tsv_file_path = "C:{0}Users{0}{1}{0}Documents{0}TempTSV.tsv".format(
            os.sep, os.getlogin())

    sample_parameters, args = Utilities.parse_sample_template(tsv_file_path)
    labware_dict, left_tiprack_list, right_tiprack_list = Utilities.labware_parsing(
        args, ctx)

    # Pipettes
    left_pipette = ctx.load_instrument(args.LeftPipette,
                                       'left',
                                       tip_racks=left_tiprack_list)
    right_pipette = ctx.load_instrument(args.RightPipette,
                                        'right',
                                        tip_racks=right_tiprack_list)

    # Set the location of the first tip in box.
    with suppress(IndexError):
        left_pipette.starting_tip = left_tiprack_list[0].wells_by_name()[
            args.LeftPipetteFirstTip]
    with suppress(IndexError):
        right_pipette.starting_tip = right_tiprack_list[0].wells_by_name()[
            args.RightPipetteFirstTip]

    sample_data_dict, water_well_dict, target_well_dict, used_wells, layout_data, max_template_vol = \
        sample_processing(args, sample_parameters)

    # This will output a plate layout file.  Only does it during the simulation from our GUI
    if ctx.is_simulating() and platform.system() == "Windows":
        run_date = datetime.datetime.today().strftime("%a %b %d %H:%M %Y")
        plate_layout_string = \
            "## ddPCR Setup\n## Setup Date:\t{}\n## Template User:\t{}\n" \
            "# Format:\tTemplate | Target | Template Dilution | Template Volume in Reaction\n\n\t"\
            .format(run_date, args.User)

        for i in range(12):
            plate_layout_string += "{}\t".format(i + 1)

        # I have to import this here because I have been unable to get natsort on the robot.
        import natsort

        for well in natsort.natsorted(layout_data):
            well_string = "\t".join(layout_data[well])
            plate_layout_string += "\n{}\t{}\t".format(well, well_string)
        plate_layout_file = \
            open("C:{0}Users{0}{1}{0}Documents{0}ddPCR_PlateLayout.tsv".format(os.sep, os.getlogin()), 'w')
        plate_layout_file.write(plate_layout_string)
        plate_layout_file.close()

    # Now do the actual dispensing.
    water_aspirated = dispense_water(args, labware_dict, water_well_dict,
                                     left_pipette, right_pipette)

    dispense_reagent_mix(args, labware_dict, target_well_dict, left_pipette,
                         right_pipette)

    water_aspirated = dispense_samples(args, labware_dict, sample_data_dict,
                                       sample_parameters, left_pipette,
                                       right_pipette, water_aspirated)

    fill_empty_wells(args, used_wells, water_aspirated, labware_dict,
                     left_pipette, right_pipette)

    ctx.comment("\nProgram Complete")

    if not ctx.is_simulating():
        os.remove(tsv_file_path)
def gpu_ifs_transform(transformation=constants.ifs_fractals["fern"],
                      width=600,
                      height=600,
                      num_points=100000,
                      block_size=64,
                      output_file="gpuOut.png"):
    """
    This function will perform the Iterated Function System (IFS) fractal
    algorithm via CUDA.
    :param block_size: GPU Block Size
    :param transformation: A transformation matrix with 7 columns representing
        [a, b, c, d, e, f, prob] for the IFS function x_(n+1) = ax_n + by_n + e
        and y_(n+1) = cx_n + dy_n + f
    :param width: Width of the image in pixels
    :param height: Height of the image in pixels
    :param num_points: Number of points in fractal
    :param output_file: File to save the image to
    :return: algorithm runtime in seconds, number of points
    """
    start = timer()
    # Generate Hammersley sequence
    block = (block_size, 1, 1)

    gpu_x = gpuarray.to_gpu(np.zeros(num_points, np.float32))
    gpu_y = gpuarray.to_gpu(np.zeros(num_points, np.float32))
    data_generation = SourceModule(KernelCode.gpu_hammersley_kernel_code)
    hammersley_func = data_generation.get_function("hammersley")
    hammersley_func(np.int32(num_points), gpu_x, gpu_y, block=block)

    transformation = np.array(transformation, np.float32)
    gpu_transform = gpuarray.to_gpu(transformation)
    rows, cols = transformation.shape

    grid = (num_points, 1, 1)
    mod = SourceModule(KernelCode.ifs_transform_kernel_code, no_extern_c=True)
    ifs_func = mod.get_function("phase1Transform")
    ifs_func(gpu_x,
             gpu_y,
             gpu_transform,
             np.int32(num_points),
             np.int32(rows),
             block=block,
             grid=grid,
             shared=sys.getsizeof(gpu_transform))

    curr_iter = 0
    while curr_iter < 15:
        ifs_func(gpu_x,
                 gpu_y,
                 gpu_transform,
                 np.int32(num_points),
                 np.int32(rows),
                 block=block,
                 grid=grid,
                 shared=sys.getsizeof(gpu_transform))
        curr_iter += 1

    x = gpu_x.get()
    y = gpu_y.get()
    points = list(zip(x, y))
    run_time = timer() - start
    Utilities.draw_image(points, width, height, output_file)
    return run_time, len(points)
Beispiel #41
0
def MakeImageDocument(url, imageType, exportInfo):
    # First make a bitmap context for a US Letter size
    # raster at the requested resolution.
    dpi = exportInfo.dpi
    width = int(8.5 * dpi)
    height = int(11 * dpi)

    # For JPEG output type the bitmap should not be transparent. If other types are added that
    # do not support transparency, this code should be updated to check for those types as well.
    needTransparentBitmap = imageType.lower(
    ) != LaunchServices.kUTTypeJPEG.lower()

    # Create an RGB Bitmap context using the generic calibrated RGB color space
    # instead of the display color space.
    useDisplayColorSpace = False
    c = createRGBBitmapContext(width, height, useDisplayColorSpace,
                               needTransparentBitmap)

    if c is None:
        print("Couldn't make destination bitmap context")
        return -1

    # Scale the coordinate system based on the resolution in dots per inch.
    Quartz.CGContextScaleCTM(c, dpi / 72, dpi / 72)

    # Set the font smoothing parameter to false since it's better to
    # draw any text without special LCD text rendering when creating
    # rendered data for export.
    if hasattr(Quartz, "CGContextSetShouldSmoothFonts"):
        Quartz.CGContextSetShouldSmoothFonts(c, False)

    # Set the scaling factor for shadows. This is a hack so that
    # drawing code that needs to know the scaling factor can
    # obtain it. Better would be that DispatchDrawing and the code
    # it calls would take this scaling factor as a parameter.
    Utilities.setScalingFactor(dpi / 72)

    # Draw into that raster...
    AppDrawing.DispatchDrawing(c, exportInfo.command)

    # Set the scaling factor back to 1.0.
    Utilities.setScalingFactor(1.0)

    # Create an image from the raster data. Calling
    # createImageFromBitmapContext gives up ownership
    # of the raster data used by the context.
    image = createImageFromBitmapContext(c)

    # Release the context now that the image is created.
    del c

    if image is None:
        # Users of this code should update this to be an error code they find useful.
        return -1

    # Now export the image.
    if exportInfo.useQTForExport:
        exportCGImageToFileWithQT(image, url, imageType, exportInfo.dpi)
    else:
        exportCGImageToFileWithDestination(image, url, imageType,
                                           exportInfo.dpi)
Beispiel #42
0
def sample_processing(args, sample_parameters):
    sample_data_dict = defaultdict(list)
    target_well_dict = defaultdict(list)
    water_well_dict = defaultdict(float)
    layout_data = defaultdict(list)

    # Builds the data frame for printing the plate layout file
    for k in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']:
        layout_data[k] = [
            '',
            '',
            '',
            '',
            '',
            '',
            '',
            '',
            '',
            '',
            '',
            '',
        ]

    plate_layout_by_column = Utilities.plate_layout()
    used_wells = []
    dest_well_count = 0
    target_list = []

    for sample_key in sample_parameters:
        sample_name = sample_parameters[sample_key][2]
        sample_string = sample_name
        sample_concentration = float(sample_parameters[sample_key][3])
        targets = sample_parameters[sample_key][4].split(",")
        replicates = int(sample_parameters[sample_key][5])

        sample_vol, diluent_vol, diluted_sample_vol, reaction_water_vol, max_template_vol = \
            Utilities.calculate_volumes(args, sample_concentration)

        sample_wells = []
        for target in targets:
            target_list.append(target)
            target_name = getattr(args, "Target_{}".format(target))[1]
            for i in range(replicates):
                well = plate_layout_by_column[dest_well_count]
                row = well[0]
                column = int(well[1:]) - 1
                s_volume = diluted_sample_vol
                if diluent_vol == 0:
                    dilution = "Neat"
                    s_volume = sample_vol
                else:
                    dilution = "1:{}".format(
                        int((sample_vol + diluent_vol) / sample_vol))

                layout_data[row][column] = "{}|{}|{}|{}"\
                    .format(sample_string, target_name, dilution, s_volume)

                water_well_dict[well] = reaction_water_vol
                target_well_dict[target].append(well)
                sample_wells.append(well)
                used_wells.append(well)
                dest_well_count += 1

        sample_data_dict[sample_key] = \
            [round(sample_vol, 1), round(diluent_vol, 1), round(diluted_sample_vol, 1), sample_wells]

    # Define our no template control wells for the targets.
    for target in target_well_dict:
        target_list.append(target)
    target_list = list(set(target_list))

    for target in target_list:
        control_name = "Water"
        target_data = getattr(args, "Target_{}".format(target))
        target_name = target_data[1]

        well = plate_layout_by_column[dest_well_count]
        used_wells.append(well)
        row = well[0]
        column = int(well[1:]) - 1
        layout_data[row][column] = "{}|{}|NA|{}".format(
            control_name, target_name, max_template_vol)
        water_well_dict[well] = max_template_vol
        dest_well_count += 1

        target_well_dict[target].append(well)

    return sample_data_dict, water_well_dict, target_well_dict, used_wells, layout_data, max_template_vol
Beispiel #43
0
# how many samples per batch to load
batch_size = 20

# show image sample before training
showImages = True

# create loss logger file names
validationLossFile = architecture+'/validationLoss.csv'
trainingLossFile = architecture+'/trainingLoss.csv'

# =============================================================================
# Setup and user feedback
# =============================================================================

# try to import the model architecture definition module    
cnn = util.importModelArchitecture(package, architecture)

# check if CUDA is available on this computer
train_on_gpu = torch.cuda.is_available()
print('Cuda available?: ' + ('Yes' if train_on_gpu else 'No'))

# =============================================================================
# Obtaining and preprocessing data
# =============================================================================

''' 
Data Augmentation

Defines a basic transformation pipeline for data augmentation. Transformations
may include random (horizontal) flips of pictures or small roatations.
Beispiel #44
0
                    default="../../All_Input/input_xml/norm_factor_EFT_1D.txt")
parser.add_argument('--syst',
                    default="../../All_Input/input_xml/systematics_fake.txt")
parser.add_argument('--jetptcut', default="35")
parser.add_argument('--discriminant', default="BDT_1bin")
parser.add_argument('--outpath',
                    '-o',
                    default="../../All_Input/workspace_xml/")
parser.add_argument('--Asimov', action="store_true")
parser.add_argument('--dtd',
                    default="../../All_Input/input_xml/HistFactorySchema.dtd")
parser.add_argument('--wspath', default="../../All_Input/workspace/")
parser.add_argument('--mode', default="vv")

args = parser.parse_args()

ROOT.gROOT.SetBatch(ROOT.kTRUE)

gSystem.Load("../macros/writeXML/writeXML_cpp.so")

Utilities.try_makedir(join(args.outpath, args.discriminant))
Utilities.try_makedir(join(args.wspath, args.discriminant))
#ROOT.H4l_XML.writeXML(ErrorType = "THEO SYS EXP", sample_in = XML_PATH + SAMPLE_IN, category_in = XML_PATH + CATEGORY_IN, norm_in = XML_PATH + NORM_IN, syst_in = XML_PATH + SYST_IN, jetptcut = "35", discriminant = "BDT", IsAsimov = true)
ROOT.H4l_XML.writeXML(args.err, args.sample, args.category, args.norm,
                      args.syst, args.jetptcut, args.discriminant,
                      args.outpath, args.Asimov, args.mode)
ROOT.H4l_XML.writeDriver(args.dtd, args.wspath, args.category, args.outpath,
                         args.discriminant)
os.system("hist2workspace -standard_form {0}".format(
    join(args.outpath, args.discriminant, "driver.xml")))
Beispiel #45
0
import genetic_algorithm as GA
import Utilities as TOOLS
import Networks

import tensorflow as tf
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical

import matplotlib.pyplot as plt
from datetime import date
from operator import itemgetter
import os

#Global Variables
GENE_SET = '10'  #Binary values for model chomosome Constant DO NOT CHANGE
MAX_NUM_NODES = int(TOOLS.max_bi(len(TOOLS.int_to_bit(50))),
                    2)  #Max number of nodes in each layer
MAX_NUM_HIDDEN_LAYERS = int(TOOLS.max_bi(len(TOOLS.int_to_bit(10))),
                            2)  #Max number of hidden layers
TOOLS.global_variables(
    ['sigmoid', 'tanh', 'relu', 'elu'],  #Activation Function 
    [.1, .01, .001, .0001, .00001, .000001, .0000001, .00000001
     ],  #Learning Rate
    [0, .1, .2, .3, .4, .5, .6, .7])  #Dropout Rate
#Change for increased performance
POPULATION_SIZE = 10  #zeo index
MUTATION_CHANCE = 0.2  #Chance of randomly tweaking the models chromosome
CROSSOVER_CHANCE = 0.45  #Chance for uniform crossover between two parents to occure
KEEP = 0.4  #Keep the top X percent of the population
maxGen = 10  #Number of generations
Beispiel #46
0
def main():
    """
    Main entry point into the application.
    """
    import getopt

    try:
        opts, args = getopt.getopt(sys.argv[1:], "c:ehio:Rrt:Vx:", [
            "exclude=",
            "extension=",
            "help",
            "noindex",
            "noempty",
            "outdir=",
            "recursive",
            "style-sheet=",
            "version",
            "exclude-file=",
            "eol=",
            "body-color=",
            "body-background-color=",
            "l1header-color=",
            "l1header-background-color=",
            "l2header-color=",
            "l2header-background-color=",
            "cfheader-color=",
            "cfheader-background-color=",
            "link-color=",
            "create-qhp",
            "qhp-outdir=",
            "qhp-namespace=",
            "qhp-virtualfolder=",
            "qhp-filtername=",
            "qhp-filterattribs=",
            "qhp-title=",
            "create-qhc",
        ])
    except getopt.error:
        usage()

    excludeDirs = [
        "CVS", ".svn", "_svn", ".ropeproject", "_ropeproject", ".eric6project",
        "_eric6project", "dist", "build", "doc", "docs"
    ]
    excludePatterns = []
    outputDir = "doc"
    recursive = False
    doIndex = True
    noempty = False
    newline = None

    stylesheetFile = ""
    colors = eric6docDefaultColors.copy()

    qtHelpCreation = False
    qtHelpOutputDir = "help"
    qtHelpNamespace = ""
    qtHelpFolder = "source"
    qtHelpFilterName = "unknown"
    qtHelpFilterAttribs = ""
    qtHelpTitle = ""
    qtHelpCreateCollection = False

    for k, v in opts:
        if k in ["-o", "--outdir"]:
            outputDir = v
        elif k in ["-R", "-r", "--recursive"]:
            recursive = True
        elif k in ["-x", "--exclude"]:
            excludeDirs.append(v)
        elif k == "--exclude-file":
            excludePatterns.append(v)
        elif k in ["-i", "--noindex"]:
            doIndex = False
        elif k in ["-e", "--noempty"]:
            noempty = True
        elif k in ["-h", "--help"]:
            usage()
        elif k in ["-V", "--version"]:
            version()
        elif k in ["-c", "--style-sheet"]:
            stylesheetFile = v
        elif k in ["-t", "--extension"]:
            if not v.startswith("."):
                v = ".{0}".format(v)
            supportedExtensions.append(v)
        elif k == "--eol":
            if v.lower() == "cr":
                newline = '\r'
            elif v.lower() == "lf":
                newline = '\n'
            elif v.lower() == "crlf":
                newline = '\r\n'

        elif k == "--body-color":
            colors['BodyColor'] = v
        elif k == "--body-background-color":
            colors['BodyBgColor'] = v
        elif k == "--l1header-color":
            colors['Level1HeaderColor'] = v
        elif k == "--l1header-background-color":
            colors['Level1HeaderBgColor'] = v
        elif k == "--l2header-color":
            colors['Level2HeaderColor'] = v
        elif k == "--l2header-background-color":
            colors['Level2HeaderBgColor'] = v
        elif k == "--cfheader-color":
            colors['CFColor'] = v
        elif k == "--cfheader-background-color":
            colors['CFBgColor'] = v
        elif k == "--link-color":
            colors['LinkColor'] = v

        elif k == "--create-qhp":
            qtHelpCreation = True
        elif k == "--qhp-outdir":
            qtHelpOutputDir = v
        elif k == "--qhp-namespace":
            qtHelpNamespace = v
        elif k == "--qhp-virtualfolder":
            qtHelpFolder = v
        elif k == "--qhp-filtername":
            qtHelpFilterName = v
        elif k == "--qhp-filterattribs":
            qtHelpFilterAttribs = v
        elif k == "--qhp-title":
            qtHelpTitle = v
        elif k == "--create-qhc":
            qtHelpCreateCollection = True

    if not args:
        usage()

    if qtHelpCreation and \
       (qtHelpNamespace == "" or
        qtHelpFolder == "" or '/' in qtHelpFolder or
            qtHelpTitle == ""):
        usage()

    if qtHelpCreation:
        from PyQt5.QtCore import QCoreApplication
        app = QCoreApplication(sys.argv)  # __IGNORE_WARNING__

    input = output = 0
    basename = ""

    if outputDir:
        if not os.path.isdir(outputDir):
            try:
                os.makedirs(outputDir)
            except EnvironmentError:
                sys.stderr.write(
                    "Could not create output directory {0}.".format(outputDir))
                sys.exit(2)
    else:
        outputDir = os.getcwd()
    outputDir = os.path.abspath(outputDir)

    if stylesheetFile:
        try:
            sf = open(stylesheetFile, "r", encoding="utf-8")
            stylesheet = sf.read()
            sf.close()
        except IOError:
            sys.stderr.write(
                "The CSS stylesheet '{0}' does not exist\n".format(
                    stylesheetFile))
            sys.stderr.write("Disabling CSS usage.\n")
            stylesheet = None
    else:
        stylesheet = None

    indexGenerator = IndexGenerator(outputDir, colors, stylesheet)

    if qtHelpCreation:
        if qtHelpOutputDir:
            if not os.path.isdir(qtHelpOutputDir):
                try:
                    os.makedirs(qtHelpOutputDir)
                except EnvironmentError:
                    sys.stderr.write(
                        "Could not create QtHelp output directory {0}.".format(
                            qtHelpOutputDir))
                    sys.exit(2)
        else:
            qtHelpOutputDir = os.getcwd()
        qtHelpOutputDir = os.path.abspath(qtHelpOutputDir)

        qtHelpGenerator = QtHelpGenerator(outputDir, qtHelpOutputDir,
                                          qtHelpNamespace, qtHelpFolder,
                                          qtHelpFilterName,
                                          qtHelpFilterAttribs, qtHelpTitle,
                                          qtHelpCreateCollection)

    for arg in args:
        if os.path.isdir(arg):
            if os.path.exists(
                    os.path.join(arg, Utilities.joinext("__init__", ".py"))):
                basename = os.path.dirname(arg)
                if arg == '.':
                    sys.stderr.write("The directory '.' is a package.\n")
                    sys.stderr.write(
                        "Please repeat the call giving its real name.\n")
                    sys.stderr.write("Ignoring the directory.\n")
                    continue
            else:
                basename = arg
            if basename:
                basename = "{0}{1}".format(basename, os.sep)

            if recursive and not os.path.islink(arg):
                names = [arg] + Utilities.getDirs(arg, excludeDirs)
            else:
                names = [arg]
        else:
            basename = ""
            names = [arg]

        for filename in names:
            inpackage = False
            if os.path.isdir(filename):
                files = []
                for ext in supportedExtensions:
                    files.extend(
                        glob.glob(
                            os.path.join(filename, Utilities.joinext("*",
                                                                     ext))))
                    initFile = os.path.join(filename,
                                            Utilities.joinext("__init__", ext))
                    if initFile in files:
                        inpackage = True
                        files.remove(initFile)
                        files.insert(0, initFile)
            else:
                if Utilities.isWindowsPlatform() and glob.has_magic(filename):
                    files = glob.glob(filename)
                else:
                    files = [filename]

            for file in files:
                skipIt = False
                for pattern in excludePatterns:
                    if fnmatch.fnmatch(os.path.basename(file), pattern):
                        skipIt = True
                        break
                if skipIt:
                    continue

                try:
                    module = Utilities.ModuleParser.readModule(
                        file,
                        basename=basename,
                        inpackage=inpackage,
                        extensions=supportedExtensions)
                    moduleDocument = ModuleDocument(module, colors, stylesheet)
                    doc = moduleDocument.genDocument()
                except IOError as v:
                    sys.stderr.write("{0} error: {1}\n".format(file, v[1]))
                    continue
                except ImportError as v:
                    sys.stderr.write("{0} error: {1}\n".format(file, v))
                    continue

                input = input + 1

                f = Utilities.joinext(
                    os.path.join(outputDir, moduleDocument.name()), ".html")

                # remember for index file generation
                indexGenerator.remember(file, moduleDocument, basename)

                # remember for QtHelp generation
                if qtHelpCreation:
                    qtHelpGenerator.remember(file, moduleDocument, basename)

                if (noempty or file.endswith('__init__.py')) \
                   and moduleDocument.isEmpty():
                    continue

                # generate output
                try:
                    out = open(f, "w", encoding="utf-8", newline=newline)
                    out.write(doc)
                    out.close()
                except IOError as v:
                    sys.stderr.write("{0} error: {1}\n".format(file, v[1]))
                else:
                    sys.stdout.write("{0} ok\n".format(f))

                output = output + 1
    sys.stdout.flush()
    sys.stderr.flush()

    # write index files
    if doIndex:
        indexGenerator.writeIndices(basename, newline=newline)

    # generate the QtHelp files
    if qtHelpCreation:
        qtHelpGenerator.generateFiles(newline=newline)

    sys.exit(0)
import os
import google.cloud
import sys
import pandas as pd
import numpy as np
sys.path.append(os.path.abspath(__file__))
import Utilities as utils
import Constants as c
from google.cloud import bigquery
from concurrent.futures import ThreadPoolExecutor
logger = utils.get_logger()


def create_directory(name):
    try:
        os.mkdir(name)
    except OSError as error:
        print(error)


project_name = "angular/angular"
dir_name = project_name.split('/')[1]
outputDirectoryPath = "scripts/exports/{directory}".format(directory=dir_name)


def get_PRs(project_name, task_name, version):
    header = [
        c.PROJECT, c.VERSION, c.DATE, c.TASK, c.T_LINE, c.T_MODULE, c.NT,
        c.T_COMMITS, c.MODULE, c.LINE, c.CONTRIBUTIONS, c.CONTRIBUTORS,
        c.COMMENTS, c.COMMENTERS
    ]
Beispiel #48
0
def train(bit_model, input_shape, classes, x_train, x_test, y_train, y_test,
          batch_size):
    """
    Train each model within the generation 

    return 
    [bit_model, acc, model]
    bit_model       String of binary 1/0 for offspring generation 
    Acc             Accuracy of the model for fitness calculation 
    Model           Keras model for saving the json file 
    """
    optimizer = 'adam'  #Can be optimized wi, x_train, x_test, y_train, y_testth GA
    cost = 'mse'
    epochs = 100

    #Find out how to use the learning rate
    num_hidden_layers, num_nodes_per_layer, learning_rate, activation_function_per_layer, dropout_rate, used = TOOLS.ANN_bit_to_model(
        bit_model, MAX_NUM_HIDDEN_LAYERS, MAX_NUM_NODES, classes)

    network = Networks.Model()
    model = network.create_architecture(num_hidden_layers, num_nodes_per_layer,
                                        activation_function_per_layer,
                                        dropout_rate, input_shape, optimizer,
                                        cost)
    acc = network.train(model, x_train, x_test, y_train, y_test, batch_size,
                        epochs)
    #print(model)
    #model.save_weights("model.h5")
    #print("Saved model to disk")
    #exit()
    return [bit_model, acc, model]
Beispiel #49
0
    def __init__(self, args, poisonIndex, transform=None):
        self.fineTuneFile = open(args.dataSplitDirectory + "fineTuneFile.txt")
        self.poisonFile = open(args.dataSplitDirectory + args.architecture + "_Poison.txt")
        self.poisonIndex = poisonIndex
        self.transform = transform
        self.classBalance = copy.deepcopy(args.classBalance)
        self.examples = copy.deepcopy(args.classBalance)
        self.K = args.K
        self.replicateImbalance = args.replicateImbalance
        self.extractedFeatures = torch.load(args.featureDirectory + args.architecture + "_CIFAR10_Features.pth")

        self.imageFiles = {}
        self.balancedImages = []
        self.filteredImages = []

        self.convexPolytopePoison = []

        self.addIndex = set()

        for line in self.poisonFile:
            imgLocation, ID = line.split()
            index = imgLocation.split("/")[-1]
            ID = int(ID.strip("\n"))

            if ID not in self.imageFiles.keys():
                self.imageFiles[ID] = []

            if args.poisonImageDirectory + str(self.poisonIndex) in imgLocation:
                self.addIndex.add(index)
                self.imageFiles[ID].append((imgLocation, ID))
                self.classBalance[ID] = self.classBalance[ID] - 1
                self.convexPolytopePoison.append(imgLocation)

        for line in self.fineTuneFile:
            imgLocation, ID = line.split()
            index = imgLocation.split("/")[-1]
            ID = int(ID.strip("\n"))

            if ID not in self.imageFiles.keys():
                self.imageFiles[ID] = []

            if index not in self.addIndex and self.classBalance[ID] > 0:
                self.addIndex.add(index)
                self.imageFiles[ID].append((imgLocation, ID))
                self.classBalance[ID] = self.classBalance[ID] - 1

        if self.replicateImbalance:
            maxClass = max(self.examples)
            classWeight = []

            for i in self.examples:
                classWeight.append(math.ceil(maxClass / i))

            for key in self.imageFiles:
                self.balancedImages = self.balancedImages + (classWeight[key] * self.imageFiles[key])[0:maxClass]
        else:
            for key in self.imageFiles:
                self.balancedImages = self.balancedImages + self.imageFiles[key]

        KNN = KNeighborsClassifier(algorithm='brute', n_neighbors=self.K)

        trainFeatures = []
        trainLabels = []

        for data in self.balancedImages:
            imgLocation, ID = data
            FV = self.extractedFeatures[imgLocation].cpu().numpy()
            trainFeatures.append(FV)
            trainLabels.append(ID)

        KNN.fit(trainFeatures, trainLabels)
        KNNLabels = KNN.predict(trainFeatures)

        cleanImages = np.equal(KNNLabels, trainLabels)

        TP, FP, TN, FN = 0, 0, 0, 0

        for data, valid in zip(self.balancedImages, cleanImages):
            imgLocation, ID = data
            if valid:
                self.filteredImages.append((imgLocation, ID))
                if imgLocation not in self.convexPolytopePoison:
                    TP = TP + 1
                else:
                    FP = FP + 1
            else:
                if imgLocation in self.convexPolytopePoison:
                    TN = TN + 1
                else:
                    FN = FN + 1

        try: MCC = ((TP * TN) - (FP * FN)) / np.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
        except: MCC = None

        print("True Positive: " + str(TP) + " | " + "True Negative: " + str(TN) + " | " + "False Positive: " + str(FP) + " | " + "False Negative: " + str(FN))
        print("Matthews Correlation Coefficient: " + str(MCC))

        Utilities.writeLog(args.logFileLocation, "True Positive: " + str(TP) + " | " + "True Negative: " + str(TN) + " | " + "False Positive: " + str(FP) + " | " + "False Negative: " + str(FN))
        Utilities.writeLog(args.logFileLocation, "Matthews Correlation Coefficient: " + str(MCC))
Beispiel #50
0
        model = SENet18()

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    CheckPoint = torch.load(checkPointDirectory + "/" + modelName + ".pth")
    model.load_state_dict(CheckPoint["net"])
    model.to(device)

    files = ["./CIFAR10/DataSplit/trainFile.txt", "./CIFAR10/DataSplit/testFile.txt",
             "./CIFAR10/DataSplit/fineTuneFile.txt", "./CIFAR10/DataSplit/" + modelName + "_Poison.txt"]

    extractedFeatures = {}
    for testFile in files:
        CIFAR = dataLoader.ConvexPolytopeFeatureExtraction_DataLoader(testFile, validationAugmentation)
        testData = DataLoader(CIFAR, batch_size=BATCHSIZE, shuffle=False, num_workers=WORKERS)

        featureVector, ID, imgLocation = Utilities.featureExtraction(model, device, testData)

        for data in zip(featureVector, imgLocation):
            FV, IL = data
            extractedFeatures[IL] = FV


    torch.save(extractedFeatures, "./CIFAR10/Features/" + modelName + "_CIFAR10_Features.pth")





Beispiel #51
0
    global UPConfig
    UPConfig = UPConfig.ReadUPConfigFromGDB("..//testing","calaveras_complete.gdb")
    TimeStep = UPConfig['TimeSteps'][0]
    
#     Logger("Testing Making Allocation Table")
#     allocTable = MakeAllocTables(UPConfig)
#     Logger("Testing Saving Allocation Table")
#     try:
#         Utilities.SavePdDataFrameToTable(allocTable, "..//testing/Calaveras.gdb", 'z_testTable')
#     except Exception, e:
#         Logger("Saving Allocation Table Failed: {err}".format(err=str(e)))
         
    
    Logger("Testing DevSpace Calculations")
    devSpace = MakeDevSpace(UPConfig, TimeStep)
    Utilities.SavePdDataFrameToTable(devSpace, "..//testing/Calaveras.gdb", "z_devspace")
    Logger("DevSpace Calculation Complete")
     
    Logger("Testing Is Higher Priority")
     
    print(IsHigherPriority(UPConfig,'rm',['rh','rm','rl'],[])) # should return True
    print(IsHigherPriority(UPConfig,'rm',['rh','rm','rl'],['rl'])) # should return True
    print(IsHigherPriority(UPConfig,'rm',['rh','rm','rl'],['rh'])) # should return False
    print(IsHigherPriority(UPConfig,'rh',['rh','rm','rl'],['rh'])) # should return True
     
    Logger("Testing Is Higher Priority Complete")
    
    
#     Logger("Test Add LU Field")
#     AddLUField(UPConfig,"upo_cum_alloc_ts1")
    
Beispiel #52
0
def main(N,
         timesteps,
         agent,
         params,
         dt,
         det=False,
         r_0=np.zeros(2),
         der_0=np.zeros(2),
         sc_0=np.identity(2),
         desc_0=0 * np.identity(2),
         bench=-0.1,
         bench_noise=0.3):

    a, dea, d, b, e = Tools.Matrices_Calculator(
        'Fisher', params)  #calculation of 2x2 matrices
    A, deA, D, B, E = matrix_parallel_Fisher(
        N, a, dea, d, b, e)  #calculation of 10x2x2 matrices

    SY = symplectic(N)  #10x2x2 symplectic form
    R, deR, SC, deSC = initial_conditions(N, r_0, der_0, sc_0,
                                          desc_0)  #fix initial conditions

    R2, deR2, SC2, deSC2 = R, deR, SC, deSC  #fix initial conditions for dumb action
    dW = np.random.randn(N, 2) * (
        dt**0.5
    )  #extraction of wiener increments (the same for both network and dumb feedback)
    dedW = (2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                             (deR)) * dt  #dedw
    CURR = -(2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                              (R)) * dt + dW  #current calculation

    dedW2 = (2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                              (deR2)) * dt  #dedw for dumb action
    CURR2 = -(2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                               (R2)) * dt + dW  #current for dumb action

    Fisher_Cl = 0  #initialize fisher for agent
    Fisher_Cl2 = 0  #initialize fisher for dumb action

    INFOS = []  #empty list for dicts of infos on agent
    INFOS2 = []  #empty dict for dicts of infos on dumb action

    obs = obs_prep(
        R, deR, CURR, SC, deSC,
        N)  #prepare observation to match observation shape of the model

    #evolution and information collection for all of trajectories at the same time
    for i in range(0, timesteps):

        if i % int(timesteps * 1e-1) == 0:
            percentage = (i / (timesteps * 1e-2))
            print('{}%'.format(percentage))

        action, _tmp = agent.predict(obs, deterministic=det)
        action = np.array(action[:, 0])

        J = action[:, np.newaxis, np.newaxis] * SY
        R, SC, deR, deSC = fishsystem_nocost_RK4(R, deR, SC, deSC, A, deA, D,
                                                 B, E, dt, dW, dedW, J)
        dFisher_Cl = dClassical_Fisher(deR, B, dt)
        Fisher_Cl = Fisher_Cl + dFisher_Cl
        Fisher_Quantum = Quantum_Fisher(N, SC, deSC, deR)

        action2 = benchmark(N, bench, bench_noise)
        J2 = action2[:, np.newaxis, np.newaxis] * SY
        R2, SC2, deR2, deSC2 = fishsystem_nocost_RK4(R2, deR2, SC2, deSC2, A,
                                                     deA, D, B, E, dt, dW,
                                                     dedW2, J2)
        dFisher_Cl2 = dClassical_Fisher(deR2, B, dt)
        Fisher_Cl2 = Fisher_Cl2 + dFisher_Cl2
        Fisher_Quantum2 = Quantum_Fisher(N, SC2, deSC2, deR2)

        #data collection
        R_mean = np.mean(R, axis=0)
        deR_mean = np.mean(deR, axis=0)
        SC_mean = np.mean(SC, axis=0)
        deSC_mean = np.mean(deSC, axis=0)
        Fisher_Cl_mean = np.mean(Fisher_Cl, axis=0)
        Fisher_Quantum_mean = np.mean(Fisher_Quantum, axis=0)
        action_mean = np.mean(action, axis=0)

        R_std = np.std(R, axis=0)
        deR_std = np.std(deR, axis=0)
        SC_std = np.std(SC, axis=0)
        deSC_std = np.std(deSC, axis=0)
        Fisher_Cl_std = np.std(Fisher_Cl, axis=0)
        Fisher_Quantum_std = np.std(Fisher_Quantum, axis=0)
        action_std = np.std(action, axis=0)

        R2_mean = np.mean(R2, axis=0)
        deR2_mean = np.mean(deR2, axis=0)
        SC2_mean = np.mean(SC2, axis=0)
        deSC2_mean = np.mean(deSC2, axis=0)
        Fisher_Cl2_mean = np.mean(Fisher_Cl2, axis=0)
        Fisher_Quantum2_mean = np.mean(Fisher_Quantum2, axis=0)
        action2_mean = np.mean(action2, axis=0)

        R2_std = np.std(R2, axis=0)
        deR2_std = np.std(deR2, axis=0)
        SC2_std = np.std(SC2, axis=0)
        deSC2_std = np.std(deSC2, axis=0)
        Fisher_Cl2_std = np.std(Fisher_Cl2, axis=0)
        Fisher_Quantum2_std = np.std(Fisher_Quantum2, axis=0)
        action2_std = np.std(action2, axis=0)

        infos = {}  #empty dict for infos on agent
        infos2 = {}  #empty dict for infos on dumb action

        infos['r_0 mean'] = R_mean[0]
        infos['r_1 mean'] = R_mean[1]
        infos['der_0 mean'] = deR_mean[0]
        infos['der_1 mean'] = deR_mean[1]
        infos['sc_00 mean'] = SC_mean[0, 0]
        infos['sc_01 mean'] = SC_mean[0, 1]
        infos['sc_10 mean'] = SC_mean[1, 0]
        infos['sc_11 mean'] = SC_mean[1, 1]
        infos['desc_00 mean'] = deSC_mean[0, 0]
        infos['desc_01 mean'] = deSC_mean[0, 1]
        infos['desc_10 mean'] = deSC_mean[1, 0]
        infos['desc_11 mean'] = deSC_mean[1, 1]
        infos['Fisher_Cl mean'] = Fisher_Cl_mean
        infos['Fisher_Quantum mean'] = Fisher_Quantum_mean
        infos['action mean'] = action_mean

        infos['r_0 std'] = R_std[0]
        infos['r_1 std'] = R_std[1]
        infos['der_0 std'] = deR_std[0]
        infos['der_1 std'] = deR_std[1]
        infos['sc_00 std'] = SC_std[0, 0]
        infos['sc_01 std'] = SC_std[0, 1]
        infos['sc_10 std'] = SC_std[1, 0]
        infos['sc_11 std'] = SC_std[1, 1]
        infos['desc_00 std'] = deSC_std[0, 0]
        infos['desc_01 std'] = deSC_std[0, 1]
        infos['desc_10 std'] = deSC_std[1, 0]
        infos['desc_11 std'] = deSC_std[1, 1]
        infos['Fisher_Cl std'] = Fisher_Cl_std
        infos['Fisher_Quantum std'] = Fisher_Quantum_std
        infos['action std'] = action_std

        INFOS.append(infos)

        infos2['r_0 mean'] = R2_mean[0]
        infos2['r_1 mean'] = R2_mean[1]
        infos2['der_0 mean'] = deR2_mean[0]
        infos2['der_1 mean'] = deR2_mean[1]
        infos2['sc_00 mean'] = SC2_mean[0, 0]
        infos2['sc_01 mean'] = SC2_mean[0, 1]
        infos2['sc_10 mean'] = SC2_mean[1, 0]
        infos2['sc_11 mean'] = SC2_mean[1, 1]
        infos2['desc_00 mean'] = deSC2_mean[0, 0]
        infos2['desc_01 mean'] = deSC2_mean[0, 1]
        infos2['desc_10 mean'] = deSC2_mean[1, 0]
        infos2['desc_11 mean'] = deSC2_mean[1, 1]
        infos2['Fisher_Cl mean'] = Fisher_Cl2_mean
        infos2['Fisher_Quantum mean'] = Fisher_Quantum2_mean
        infos2['action mean'] = action2_mean

        infos2['r_0 std'] = R2_std[0]
        infos2['r_1 std'] = R2_std[1]
        infos2['der_0 std'] = deR2_std[0]
        infos2['der_1 std'] = deR2_std[1]
        infos2['sc_00 std'] = SC2_std[0, 0]
        infos2['sc_01 std'] = SC2_std[0, 1]
        infos2['sc_10 std'] = SC2_std[1, 0]
        infos2['sc_11 std'] = SC2_std[1, 1]
        infos2['desc_00 std'] = deSC2_std[0, 0]
        infos2['desc_01 std'] = deSC2_std[0, 1]
        infos2['desc_10 std'] = deSC2_std[1, 0]
        infos2['desc_11 std'] = deSC2_std[1, 1]
        infos2['Fisher_Cl std'] = Fisher_Cl2_std
        infos2['Fisher_Quantum std'] = Fisher_Quantum2_std
        infos2['action std'] = action2_std

        INFOS2.append(infos2)

        dW = np.random.randn(N, 2) * (dt**0.5)
        dedW = (2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))), (deR)) * dt
        CURR = -(2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                                  (R)) * dt + dW
        dedW2 = (2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                                  (deR2)) * dt
        CURR2 = -(2**0.5) * prod_v((np.transpose(B, axes=(0, 2, 1))),
                                   (R2)) * dt + dW

        obs = obs_prep(R, deR, CURR, SC, deSC, N)

    #save in dataframes
    DATA = pd.DataFrame(INFOS)
    DATA2 = pd.DataFrame(INFOS2)

    return DATA, DATA2
Beispiel #53
0
    plt.clf()

    plt.plot(epochs, val_loss, 'r', label='Validation Loss')
    plt.plot(epochs, train_loss, 'b', label='Train Loss')
    plt.title('Loss / Mean Sqared Error')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.savefig('model_phase01_loss.png')

if __name__ == "__main__":
    script_start = datetime.datetime.now()

    #minMaxValues = Utilities.readMinMaxFromCSV(minMaxCSVpath)
    [images, categories, filenames] = Utilities.loadImagesAndCategories(images, imgsDir, categories, normalizedDataPath, phase = 1, inputWidth = inputWidth, inputHeight = inputHeight)

    [testImages, testLabels] = trainTestDatasetSplit(images, categories)

    model_name = "model_phase01.h5"

    model = cnn.create_model(inputWidth, inputHeight, 1, outputNo)

    # change to cnn input format
    df_im = np.asarray(images)
    df_im = df_im.reshape(df_im.shape[0], inputWidth, inputHeight, 1)
    df_cat = np.asarray(categories)
    df_cat = df_cat.reshape(df_cat.shape[0], outputNo)
    tr_im, val_im, tr_cat, val_cat = train_test_split(df_im, df_cat, test_size=0.2)

    tensorboard = TensorBoard(log_dir=imgsDir + "logs_img1" + "\{}".format(time()))
Beispiel #54
0
def MakeDevSpace(UPConfig,TimeStep):
    """
    Make the devSpaceTable to contain the list of unconstrained space for each land use in each polygon.
    This accounts for the effects of all constraints. It also contains the general plan class for each polygon.
    The last step fills in all No Data values with 0 (assuming that if there is no data, there is also no available space). 
    
    Called By:
    Allocation.PriAlloc
    
    Calls:
    MakePolyList
    MakeGPList
    Utilities.MergeDataFrames
    
    Arguments:
    UPConfig
    TimeStep (as list) - ['ts1','timestep1']
    
    Returns:
    devSpaceTable
    """

    # Build DevSpaceTable for this time step
    Logger("Preparing Developable Space Table")
    devSpaceTable = MakePolyList(UPConfig)
    
    
    for lu in UPConfig['LUPriority']: 

        # get unconstrained space
        dswhereclause = """ TimeStep = '{ts}' and lu = '{lu}' """.format(ts= TimeStep[0],lu=lu)
        dsarray = arcpy.da.TableToNumPyArray(os.path.join(UPConfig['paths']['dbpath'],UPConfig['paths']['dbname'],'up_const'),[UPConfig['BaseGeom_id'],'developable_acres'],dswhereclause) # TODO: rename this to unconstrained_acres
        dsdf = pd.DataFrame(dsarray)
        dsdf.columns = [[UPConfig['BaseGeom_id'],'uncon_ac_{ts}_{lu}'.format(ts=TimeStep[0],lu=lu)]]
        dsarray = None
        # get gp permissablity (boolean)
        gparray = arcpy.da.TableToNumPyArray(os.path.join(UPConfig['paths']['dbpath'],UPConfig['paths']['dbname'],'up_bg_gp_avail_{ts}'.format(ts=TimeStep[0])),[UPConfig['BaseGeom_id'],'gp_{lu}'.format(lu=lu)])
        gpdf = pd.DataFrame(gparray)
        gpdf.columns = [[UPConfig['BaseGeom_id'],'gp_{ts}_{lu}'.format(ts=TimeStep[0],lu=lu)]]
        gparray = None
        # get net weights
        wtwhereclause = """ timestep = '{ts}' and lu = '{lu}' """.format(ts= TimeStep[0],lu=lu)
        wtarray = arcpy.da.TableToNumPyArray(os.path.join(UPConfig['paths']['dbpath'],UPConfig['paths']['dbname'],'up_net_weights'),[UPConfig['BaseGeom_id'],'weight'],wtwhereclause)
        wtdf = pd.DataFrame(wtarray)
        wtdf.columns = [[UPConfig['BaseGeom_id'],'wt_{ts}_{lu}'.format(ts=TimeStep[0],lu=lu)]]
        wtarray = None
        
#         #for debug
#         if lu == 'RH':
#             df1 = Utilities.ConvertPdDataFrameToNumpyArray(devSpaceTable)
#             df2 = Utilities.ConvertPdDataFrameToNumpyArray(gpdf)
#             df3 = Utilities.ConvertPdDataFrameToNumpyArray(dsdf)
#             df4 = Utilities.ConvertPdDataFrameToNumpyArray(wtdf)
            
#             arcpy.da.NumPyArrayToTable(devSpaceTable,r"G:\Public\UPLAN\Calaveras\Debug\devSpaceTable.csv")
#             arcpy.da.NumPyArrayToTable(gpdf,r"G:\Public\UPLAN\Calaveras\Debug\gpdf.csv")
#             arcpy.da.NumPyArrayToTable(dsdf,r"G:\Public\UPLAN\Calaveras\Debug\dsdf.csv")
#             arcpy.da.NumPyArrayToTable(wtdf,r"G:\Public\UPLAN\Calaveras\Debug\wtdf.csv")
            
#             Utilities.SavePdDataFrameToTable(df1, r"G:\Public\UPLAN\Calaveras\Debug", 'devSpaceTable.csv')
#             Utilities.SavePdDataFrameToTable(df2, r"G:\Public\UPLAN\Calaveras\Debug", 'gpdf.csv')
#             Utilities.SavePdDataFrameToTable(df3, r"G:\Public\UPLAN\Calaveras\Debug", 'dsdf.csv')
#             Utilities.SavePdDataFrameToTable(df4, r"G:\Public\UPLAN\Calaveras\Debug", 'wtdf.csv')
            
        # create table with developable space, gp availablity (boolean), and net weight for each parcel
        devSpaceTable = Utilities.MergeDataFrames([devSpaceTable, gpdf,dsdf,wtdf], str(UPConfig['BaseGeom_id']))
#         devSpaceTable.set_index([UPConfig['BaseGeom_id']])
#         devSpaceTable[UPConfig['BaseGeom_id']] = devSpaceTable.index #- this causes a change in parcelIDs
        
        #for debug
#         Utilities.SavePdDataFrameToTable(devSpaceTable, r"G:\Public\UPLAN\Calaveras\Debug", 'devSpaceTable.csv')
#         Utilities.SavePdDataFrameToTable(gpdf, r"G:\Public\UPLAN\Calaveras\Debug", 'gpdf.csv')
#         Utilities.SavePdDataFrameToTable(dsdf, r"G:\Public\UPLAN\Calaveras\Debug", 'dsdf.csv')
#         Utilities.SavePdDataFrameToTable(wtdf, r"G:\Public\UPLAN\Calaveras\Debug", 'wtdf.csv')
           
    # get General Plans
    gplans = MakeGPList(UPConfig,TimeStep)
     
    # Redev Table. 
    if UPConfig['Redev'] != None:
        redevTable = os.path.join(UPConfig['paths']['dbpath'],UPConfig['paths']['dbname'],UPConfig['Redev'])
        flds = [UPConfig['BaseGeom_id'],UPConfig['Redev_pop'],UPConfig['Redev_emp']]
        redevarray = arcpy.da.TableToNumPyArray(redevTable,flds,skip_nulls=True)
        reDevDF = pd.DataFrame(redevarray) 
        devSpaceTable = Utilities.MergeDataFrames([devSpaceTable, gplans,reDevDF], UPConfig['BaseGeom_id'])
    else:
        devSpaceTable = Utilities.MergeDataFrames([devSpaceTable, gplans], str(UPConfig['BaseGeom_id']))
         
    # fix null values 
    devSpaceTable = devSpaceTable.fillna(0)
    
    return(devSpaceTable)
def train(args, arguments):
    batch_time = Utilities.AverageMeter()
    losses = Utilities.AverageMeter()

    # switch to train mode
    arguments['detr'].train()
    end = time.time()

    train_loader_len = int(math.ceil(arguments['TADL'].shard_size / args.batch_size))
    i = 0
    arguments['TADL'].reset_avail_winds(arguments['epoch'])

    ########################################################
    #_, inputs, _, targets, _ = arguments['TADL'].get_batch()
    #targets = transform_targets(targets)
    #lr_scheduler = torch.optim.lr_scheduler.StepLR(arguments['optimizer'], args.lrsp,
    #                                                          args.lrm)
    ########################################################
    ########################################################
    #while True:
    ########################################################
    while i * arguments['TADL'].batch_size < arguments['TADL'].shard_size:
        # get the noisy inputs and the labels
        _, inputs, _, targets, _ = arguments['TADL'].get_batch()

        mean = torch.mean(inputs, 1, True)
        inputs = inputs-mean
            
        # zero the parameter gradients
        arguments['optimizer'].zero_grad()

        # forward + backward + optimize
        inputs = inputs.unsqueeze(1)
        outputs = arguments['detr'](inputs)

    	########################################################
        #inputs = inputs.squeeze(1)
    	########################################################

        # Compute the loss
        targets = transform_targets(targets)
        loss_dict = arguments['criterion'].forward(outputs=outputs, targets=targets)
        weight_dict = arguments['criterion'].weight_dict
        loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)

        # compute gradient and do optimizer step
        loss.backward()
        torch.nn.utils.clip_grad_norm_(arguments['detr'].parameters(), 0.1)
        arguments['optimizer'].step()

        #if args.test:
            #if i > 10:
                #break

        if i%args.print_freq == 0:
        #if i%args.print_freq == 0 and i != 0:
            # Every print_freq iterations, check the loss and speed.
            # For best performance, it doesn't make sense to print these metrics every
            # iteration, since they incur an allreduce and some host<->device syncs.

            # Average loss across processes for logging
            if args.distributed:
                reduced_loss = Utilities.reduce_tensor(loss.data, args.world_size)
            else:
                reduced_loss = loss.data

            # to_python_float incurs a host<->device sync
            losses.update(Utilities.to_python_float(reduced_loss), args.batch_size)

            if not args.cpu:
                torch.cuda.synchronize()

            batch_time.update((time.time() - end)/args.print_freq, args.print_freq)
            end = time.time()

            if args.local_rank == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Speed {3:.3f} ({4:.3f})\t'
                      'Loss {loss.val:.10f} ({loss.avg:.4f})'.format(
                      arguments['epoch'], i, train_loader_len,
                      args.world_size*args.batch_size/batch_time.val,
                      args.world_size*args.batch_size/batch_time.avg,
                      batch_time=batch_time,
                      loss=losses))

        i += 1
    	########################################################
        #lr_scheduler.step()
    	########################################################

    arguments['loss_history'].append(losses.avg)

    return batch_time.sum, batch_time.avg
    def __init__(self, parent=None, doLoadPlugins=True, develPlugin=None):
        """
        Constructor
        
        The Plugin Manager deals with three different plugin directories.
        The first is the one, that is part of eric6 (eric6/Plugins). The
        second one is the global plugin directory called 'eric6plugins',
        which is located inside the site-packages directory. The last one
        is the user plugin directory located inside the .eric6 directory
        of the users home directory.
        
        @param parent reference to the parent object (QObject)
        @keyparam doLoadPlugins flag indicating, that plugins should
            be loaded (boolean)
        @keyparam develPlugin filename of a plugin to be loaded for
            development (string)
        @exception PluginPathError raised to indicate an invalid plug-in path
        """
        super(PluginManager, self).__init__(parent)

        self.__ui = parent
        self.__develPluginFile = develPlugin
        self.__develPluginName = None

        self.__inactivePluginsKey = "PluginManager/InactivePlugins"

        self.pluginDirs = {
            "eric6":
            os.path.join(getConfig('ericDir'), "Plugins"),
            "global":
            os.path.join(Utilities.getPythonModulesDirectory(),
                         "eric6plugins"),
            "user":
            os.path.join(Utilities.getConfigDir(), "eric6plugins"),
        }
        self.__priorityOrder = ["eric6", "global", "user"]

        self.__defaultDownloadDir = os.path.join(Utilities.getConfigDir(),
                                                 "Downloads")

        self.__activePlugins = {}
        self.__inactivePlugins = {}
        self.__onDemandActivePlugins = {}
        self.__onDemandInactivePlugins = {}
        self.__activeModules = {}
        self.__inactiveModules = {}
        self.__onDemandActiveModules = {}
        self.__onDemandInactiveModules = {}
        self.__failedModules = {}

        self.__foundCoreModules = []
        self.__foundGlobalModules = []
        self.__foundUserModules = []

        self.__modulesCount = 0

        pdirsExist, msg = self.__pluginDirectoriesExist()
        if not pdirsExist:
            raise PluginPathError(msg)

        if doLoadPlugins:
            if not self.__pluginModulesExist():
                raise PluginModulesError

            self.__insertPluginsPaths()

            self.__loadPlugins()

        self.__checkPluginsDownloadDirectory()

        self.pluginRepositoryFile = \
            os.path.join(Utilities.getConfigDir(), "PluginRepository")

        # attributes for the network objects
        self.__networkManager = QNetworkAccessManager(self)
        self.__networkManager.proxyAuthenticationRequired.connect(
            proxyAuthenticationRequired)
        if SSL_AVAILABLE:
            self.__sslErrorHandler = E5SslErrorHandler(self)
            self.__networkManager.sslErrors.connect(self.__sslErrors)
        self.__replies = []
Beispiel #57
0
        #AvgLArQ   = chainReader.ReadBranch("jet_%s_AverageLArQF"%JET_TYPE)[iJet]
        #LArQmean  = AvgLArQ/65535.

        jet_pt.push_back(jetpt)
        jet_e.push_back(jete)
        jet_eta.push_back(jeteta)
        jet_raw_eta.push_back(
            chainReader.ReadBranch("jet_%s_%s_eta" % (JET_TYPE, SCALE))[iJet])
        jet_phi.push_back(jetphi)
        jet_y.push_back(jety)
        jet_jvtxf.push_back(jetjvtxf)
        jet_origin.push_back(jetorigin)
        jetugly = jetugly + Utilities.TileHotSpotCleaning(
            chainReader.ReadBranch("RunNumber"),
            jeteta,
            jetphi,
            fmax,
            smax,
            verbose=False)
        jetugly = jetugly + Utilities.ChfCleaning(
            jetpt, jeteta, chf, emf, verbose=False)
        jet_isUgly.push_back(jetugly)

        # === Jet cleaning
        #isBadLooser,isBadLooserReason = Utilities.JetID("LooserBad", larq, negE, emf, hecf, jet_time, fmax, em_eta, chf, hecq, LArQmean)
        #isBadLoose,isBadLooseReason   = Utilities.JetID("LooseBad",  larq, negE, emf, hecf, jet_time, fmax, em_eta, chf, hecq, LArQmean)
        #isBadMedium,isBadMediumReason = Utilities.JetID("MediumBad", larq, negE, emf, hecf, jet_time, fmax, em_eta, chf, hecq, LArQmean)
        #isBadTight,isBadTightReason   = Utilities.JetID("TightBad",  larq, negE, emf, hecf, jet_time, fmax, em_eta, chf, hecq, LArQmean)

        isBadLooser = chainReader.ReadBranch("jet_%s_isBadLooseMinus" %
                                             JET_TYPE)[iJet]
def validate(args, arguments):
    average_precision = Utilities.AverageMeter()

    # switch to evaluate mode
    arguments['detr'].eval()

    end = time.time()

    val_loader_len = int(math.ceil(arguments['VADL'].shard_size / args.batch_size))
    i = 0
    arguments['VADL'].reset_avail_winds(arguments['epoch'])
    pred_segments = []
    true_segments = []
    while i * arguments['VADL'].batch_size < arguments['VADL'].shard_size:
        # get the noisy inputs and the labels
        _, inputs, _, targets, labels = arguments['TADL'].get_batch()

        mean = torch.mean(inputs, 1, True)
        inputs = inputs-mean
            
        with torch.no_grad():
            # forward
            inputs = inputs.unsqueeze(1)
            outputs = arguments['detr'](inputs)

        for j in range(arguments['VADL'].batch_size):
            train_idx = int(j + i * arguments['VADL'].batch_size)

            probabilities = F.softmax(outputs['pred_logits'][j], dim=1)
            aux_pred_segments = outputs['pred_segments'][j]

            for probability, pred_segment in zip(probabilities.to('cpu'), aux_pred_segments.to('cpu')):
                #if probability[-1] < 0.9:
                if torch.argmax(probability) != args.num_classes:
                    segment = [train_idx, np.argmax(probability[:-1]).item(), 1.0 - probability[-1].item(), pred_segment[0].item(), pred_segment[1].item()]
                    pred_segments.append(segment)


            num_pulses = labels[j, 0]

            starts = targets[j, 0]
            widths = targets[j, 1]
            categories = targets[j, 3]
            
            for k in range(int(num_pulses.item())):
                segment = [train_idx, categories[k].item(), 1.0, starts[k].item(), widths[k].item()]
                true_segments.append(segment)


        i += 1


    for threshold in np.arange(0.5, 0.95, 0.05):
        detection_precision=mean_average_precision(device=arguments['device'],
                                                   pred_segments=pred_segments,
                                                   true_segments=true_segments,
                                                   iou_threshold=threshold,
                                                   seg_format="mix",
                                                   num_classes=1)

        if args.distributed:
            reduced_detection_precision = Utilities.reduce_tensor(detection_precision.data, args.world_size)
        else:
            reduced_detection_precision = detection_precision.data


        average_precision.update(Utilities.to_python_float(reduced_detection_precision))

    if not args.evaluate:
        arguments['precision_history'].append(average_precision.avg)

    return average_precision.avg
Beispiel #59
0
def messageHandler(msgType, *args):
    """
    Module function handling messages.
    
    @param msgType type of the message (integer, QtMsgType)
    @param args message handler arguments, for PyQt4 message to be shown
        (bytes), for PyQt5 context information (QMessageLogContext) and
        message to be shown (bytes)
    """
    if len(args) == 2:
        context = args[0]
        message = args[1]
    else:
        message = args[0]
    if __msgHandlerDialog:
        try:
            if msgType == QtDebugMsg:
                messageType = QCoreApplication.translate(
                    "E5ErrorMessage", "Debug Message:")
            elif msgType == QtWarningMsg:
                messageType = QCoreApplication.translate(
                    "E5ErrorMessage", "Warning:")
            elif msgType == QtCriticalMsg:
                messageType = QCoreApplication.translate(
                    "E5ErrorMessage", "Critical:")
            elif msgType == QtFatalMsg:
                messageType = QCoreApplication.translate(
                    "E5ErrorMessage", "Fatal Error:")
            if isinstance(message, bytes):
                message = Utilities.decodeBytes(message)
            message = message.replace("\r\n", "<br/>")\
                             .replace("\n", "<br/>")\
                             .replace("\r", "<br/>")
            if len(args) == 2:
                msg = "<p><b>{0}</b></p><p>{1}</p><p>File: {2}</p>" \
                    "<p>Line: {3}</p><p>Function: {4}</p>".format(
                        messageType, Utilities.html_uencode(message),
                        context.file, context.line, context.function)
            else:
                msg = "<p><b>{0}</b></p><p>{1}</p>".format(
                    messageType, Utilities.html_uencode(message))
            if QThread.currentThread() == qApp.thread():
                __msgHandlerDialog.showMessage(msg)
            else:
                QMetaObject.invokeMethod(
                    __msgHandlerDialog,
                    "showMessage",
                    Qt.QueuedConnection,
                    Q_ARG(str, msg))
            return
        except RuntimeError:
            pass
    elif __origMsgHandler:
        __origMsgHandler(msgType, message)
        return
    
    if msgType == QtDebugMsg:
        messageType = QCoreApplication.translate(
            "E5ErrorMessage", "Debug Message")
    elif msgType == QtWarningMsg:
        messageType = QCoreApplication.translate(
            "E5ErrorMessage", "Warning")
    elif msgType == QtCriticalMsg:
        messageType = QCoreApplication.translate(
            "E5ErrorMessage", "Critical")
    elif msgType == QtFatalMsg:
        messageType = QCoreApplication.translate(
            "E5ErrorMessage", "Fatal Error")
    if isinstance(message, bytes):
        message = message.decode()
    if len(args) == 2:
        print("{0}: {1} in {2} at line {3} ({4})".format(
            messageType, message, context.file, context.line,
            context.function))
    else:
        print("{0}: {1}".format(messageType, message))
def main():
    global best_precision, args
    best_precision = 0
    args = parse()


    if not len(args.data):
        raise Exception("error: No data set provided")


    args.distributed = False
    if 'WORLD_SIZE' in os.environ:
        args.distributed = int(os.environ['WORLD_SIZE']) > 1

    args.gpu = 0
    args.world_size = 1

    if args.distributed:
        args.gpu = args.local_rank

        if not args.cpu:
            torch.cuda.set_device(args.gpu)

        torch.distributed.init_process_group(backend='gloo',
                                             init_method='env://')
        args.world_size = torch.distributed.get_world_size()

    args.total_batch_size = args.world_size * args.batch_size

    # Set the device
    device = torch.device('cpu' if args.cpu else 'cuda:' + str(args.gpu))





    #######################################################################
    #   Start DETR contruction
    #######################################################################

    # create DETR backbone

    # create backbone pulse counter
    if args.test:
        args.pulse_counter_arch = 'ResNet10'

    if args.local_rank==0 and args.verbose:
        print("=> creating backbone pulse counter '{}'".format(args.pulse_counter_arch))

    if args.pulse_counter_arch == 'ResNet18':
        backbone_pulse_counter = rn.ResNet18_Counter()
    elif args.pulse_counter_arch == 'ResNet34':
        backbone_pulse_counter = rn.ResNet34_Counter()
    elif args.pulse_counter_arch == 'ResNet50':
        backbone_pulse_counter = rn.ResNet50_Counter()
    elif args.pulse_counter_arch == 'ResNet101':
        backbone_pulse_counter = rn.ResNet101_Counter()
    elif args.pulse_counter_arch == 'ResNet152':
        backbone_pulse_counter = rn.ResNet152_Counter()
    elif args.pulse_counter_arch == 'ResNet10':
        backbone_pulse_counter = rn.ResNet10_Counter()
    else:
        print("Unrecognized {} architecture for the backbone pulse counter" .format(args.pulse_counter_arch))


    backbone_pulse_counter = backbone_pulse_counter.to(device)

    # create backbone feature predictor
    if args.test:
        args.feature_predictor_arch = 'ResNet10'

    if args.local_rank==0 and args.verbose:
        print("=> creating backbone feature predictor '{}'".format(args.feature_predictor_arch))

    if args.feature_predictor_arch == 'ResNet18':
        backbone_feature_predictor = rn.ResNet18_Custom()
    elif args.feature_predictor_arch == 'ResNet34':
        backbone_feature_predictor = rn.ResNet34_Custom()
    elif args.feature_predictor_arch == 'ResNet50':
        backbone_feature_predictor = rn.ResNet50_Custom()
    elif args.feature_predictor_arch == 'ResNet101':
        backbone_feature_predictor = rn.ResNet101_Custom()
    elif args.feature_predictor_arch == 'ResNet152':
        backbone_feature_predictor = rn.ResNet152_Custom()
    elif args.feature_predictor_arch == 'ResNet10':
        backbone_feature_predictor = rn.ResNet10_Custom()
    else:
        print("Unrecognized {} architecture for the backbone feature predictor" .format(args.feature_predictor_arch))


    backbone_feature_predictor = backbone_feature_predictor.to(device)



    # For distributed training, wrap the model with torch.nn.parallel.DistributedDataParallel.
    if args.distributed:
        if args.cpu:
            backbone_pulse_counter = DDP(backbone_pulse_counter)
            backbone_feature_predictor = DDP(backbone_feature_predictor)
        else:
            backbone_pulse_counter = DDP(backbone_pulse_counter, device_ids=[args.gpu], output_device=args.gpu)
            backbone_feature_predictor = DDP(backbone_feature_predictor, device_ids=[args.gpu], output_device=args.gpu)

        if args.verbose:
            print('Since we are in a distributed setting the backbone componets are replicated here in local rank {}'
                                    .format(args.local_rank))



    # bring counter from a checkpoint
    if args.counter:
        # Use a local scope to avoid dangling references
        def bring_counter():
            if os.path.isfile(args.counter):
                print("=> loading backbone pulse counter '{}'" .format(args.counter))
                if args.cpu:
                    checkpoint = torch.load(args.counter, map_location='cpu')
                else:
                    checkpoint = torch.load(args.counter, map_location = lambda storage, loc: storage.cuda(args.gpu))

                loss_history_1 = checkpoint['loss_history']
                counter_error_history = checkpoint['Counter_error_history']
                best_error_1 = checkpoint['best_error']
                backbone_pulse_counter.load_state_dict(checkpoint['state_dict'])
                total_time_1 = checkpoint['total_time']
                print("=> loaded counter '{}' (epoch {})"
                                .format(args.counter, checkpoint['epoch']))
                print("Counter best precision saved was {}" .format(best_error_1))
                return best_error_1, backbone_pulse_counter, loss_history_1, counter_error_history, total_time_1
            else:
                print("=> no counter found at '{}'" .format(args.counter))
    
        best_error_1, backbone_pulse_counter, loss_history_1, counter_error_history, total_time_1 = bring_counter()
    else:
        raise Exception("error: No counter path provided")




    # bring predictor from a checkpoint
    if args.predictor:
        # Use a local scope to avoid dangling references
        def bring_predictor():
            if os.path.isfile(args.predictor):
                print("=> loading backbone feature predictor '{}'" .format(args.predictor))
                if args.cpu:
                    checkpoint = torch.load(args.predictor, map_location='cpu')
                else:
                    checkpoint = torch.load(args.predictor, map_location = lambda storage, loc: storage.cuda(args.gpu))

                loss_history_2 = checkpoint['loss_history']
                duration_error_history = checkpoint['duration_error_history']
                amplitude_error_history = checkpoint['amplitude_error_history']
                best_error_2 = checkpoint['best_error']
                backbone_feature_predictor.load_state_dict(checkpoint['state_dict'])
                total_time_2 = checkpoint['total_time']
                print("=> loaded predictor '{}' (epoch {})"
                                .format(args.predictor, checkpoint['epoch']))
                print("Predictor best precision saved was {}" .format(best_error_2))
                return best_error_2, backbone_feature_predictor, loss_history_2, duration_error_history, amplitude_error_history, total_time_2 
            else:
                print("=> no predictor found at '{}'" .format(args.predictor))

        best_error_2, backbone_feature_predictor, loss_history_2, duration_error_history, amplitude_error_history, total_time_2 = bring_predictor()
    else:
        raise Exception("error: No predictor path provided")



    # create backbone
    if args.local_rank==0 and args.verbose:
        print("=> creating backbone")

    if args.feature_predictor_arch == 'ResNet18':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=512)
    elif args.feature_predictor_arch == 'ResNet34':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=512)
    elif args.feature_predictor_arch == 'ResNet50':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=2048)
    elif args.feature_predictor_arch == 'ResNet101':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=2048)
    elif args.feature_predictor_arch == 'ResNet152':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=2048)
    elif args.feature_predictor_arch == 'ResNet10':
        backbone=build_backbone(pulse_counter=backbone_pulse_counter,
                                feature_predictor=backbone_feature_predictor,
                                num_channels=512)
    else:
        print("Unrecognized {} architecture for the backbone feature predictor" .format(args.feature_predictor_arch))


    backbone = backbone.to(device)











    # create DETR transformer
    if args.local_rank==0 and args.verbose:
        print("=> creating transformer")

    if args.test:
        args.transformer_hidden_dim = 64
        args.transformer_num_heads = 2
        args.transformer_dim_feedforward = 256
        args.transformer_num_enc_layers = 2
        args.transformer_num_dec_layers = 2

    args.transformer_pre_norm = True
    transformer = build_transformer(hidden_dim=args.transformer_hidden_dim,
                                    dropout=args.transformer_dropout,
                                    nheads=args.transformer_num_heads,
                                    dim_feedforward=args.transformer_dim_feedforward,
                                    enc_layers=args.transformer_num_enc_layers,
                                    dec_layers=args.transformer_num_dec_layers,
                                    pre_norm=args.transformer_pre_norm)






    # create DETR in itself
    if args.local_rank==0 and args.verbose:
        print("=> creating DETR")

    detr = DT.DETR(backbone=backbone,
                   transformer=transformer,
                   num_classes=args.num_classes,
                   num_queries=args.num_queries)

    detr = detr.to(device)

    # For distributed training, wrap the model with torch.nn.parallel.DistributedDataParallel.
    if args.distributed:
        if args.cpu:
            detr = DDP(detr)
        else:
            detr = DDP(detr, device_ids=[args.gpu], output_device=args.gpu)

        if args.verbose:
            print('Since we are in a distributed setting DETR model is replicated here in local rank {}'
                                    .format(args.local_rank))



    # Set matcher
    if args.local_rank==0 and args.verbose:
        print("=> set Hungarian Matcher")

    matcher = mtchr.HungarianMatcher(cost_class=args.cost_class,
                                     cost_bsegment=args.cost_bsegment,
                                     cost_giou=args.cost_giou)





    # Set criterion
    if args.local_rank==0 and args.verbose:
        print("=> set criterion for the loss")

    weight_dict = {'loss_ce': args.loss_ce,
                   'loss_bsegment': args.loss_bsegment,
                   'loss_giou': args.loss_giou}

    losses = ['labels', 'segments', 'cardinality']

    criterion = DT.SetCriterion(num_classes=args.num_classes,
                                matcher=matcher,
                                weight_dict=weight_dict,
                                eos_coef=args.eos_coef,
                                losses=losses)

    criterion = criterion.to(device)



    # Set optimizer
    optimizer = Model_Util.get_DETR_optimizer(detr, args)
    if args.local_rank==0 and args.verbose:
        print('Optimizer used for this run is {}'.format(args.optimizer))


    # Set learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lrsp,
                                                              args.lrm)



    total_time = Utilities.AverageMeter()
    loss_history = []
    precision_history = []
    # Optionally resume from a checkpoint
    if args.resume:
        # Use a local scope to avoid dangling references
        def resume():
            if os.path.isfile(args.resume):
                print("=> loading checkpoint '{}'" .format(args.resume))
                if args.cpu:
                    checkpoint = torch.load(args.resume, map_location='cpu')
                else:
                    checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))

                loss_history = checkpoint['loss_history']
                precision_history = checkpoint['precision_history']
                start_epoch = checkpoint['epoch']
                best_precision = checkpoint['best_precision']
                detr.load_state_dict(checkpoint['state_dict'])
                criterion.load_state_dict(checkpoint['criterion'])
                optimizer.load_state_dict(checkpoint['optimizer'])
                lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
                total_time = checkpoint['total_time']
                print("=> loaded checkpoint '{}' (epoch {})"
                                .format(args.resume, checkpoint['epoch']))
                return start_epoch, detr, criterion, optimizer, lr_scheduler, loss_history, precision_history, total_time, best_precision 
            else:
                print("=> no checkpoint found at '{}'" .format(args.resume))
    
        args.start_epoch, detr, criterion, optimizer, lr_scheduler, loss_history, precision_history, total_time, best_precision = resume()







    # Data loading code
    if len(args.data) == 1:
        traindir = os.path.join(args.data[0], 'train')
        valdir = os.path.join(args.data[0], 'val')
    else:
        traindir = args.data[0]
        valdir= args.data[1]

    if args.test:
        training_f = h5py.File(traindir + '/train_toy.h5', 'r')
        validation_f = h5py.File(valdir + '/validation_toy.h5', 'r')
    else:
        training_f = h5py.File(traindir + '/train.h5', 'r')
        validation_f = h5py.File(valdir + '/validation.h5', 'r')


    # this is the dataset for training
    sampling_rate = 10000                   # This is the number of samples per second of the signals in the dataset
    if args.test:
        number_of_concentrations = 2        # This is the number of different concentrations in the dataset
        number_of_durations = 2             # This is the number of different translocation durations per concentration in the dataset
        number_of_diameters = 4             # This is the number of different translocation durations per concentration in the dataset
        window = 0.5                        # This is the time window in seconds
        length = 20                         # This is the time of a complete signal for certain concentration and duration
    else:
        number_of_concentrations = 20       # This is the number of different concentrations in the dataset
        number_of_durations = 5             # This is the number of different translocation durations per concentration in the dataset
        number_of_diameters = 15            # This is the number of different translocation durations per concentration in the dataset
        window = 0.5                        # This is the time window in seconds
        length = 20                         # This is the time of a complete signal for certain concentration and duration

    # Training Artificial Data Loader
    TADL = Artificial_DataLoader(args.world_size, args.local_rank, device, training_f, sampling_rate,
                                 number_of_concentrations, number_of_durations, number_of_diameters,
                                 window, length, args.batch_size)

    # this is the dataset for validating
    if args.test:
        number_of_concentrations = 2        # This is the number of different concentrations in the dataset
        number_of_durations = 2             # This is the number of different translocation durations per concentration in the dataset
        number_of_diameters = 4             # This is the number of different translocation durations per concentration in the dataset
        window = 0.5                        # This is the time window in seconds
        length = 10                         # This is the time of a complete signal for certain concentration and duration
    else:
        number_of_concentrations = 20       # This is the number of different concentrations in the dataset
        number_of_durations = 5             # This is the number of different translocation durations per concentration in the dataset
        number_of_diameters = 15            # This is the number of different translocation durations per concentration in the dataset
        window = 0.5                        # This is the time window in seconds
        length = 10                         # This is the time of a complete signal for certain concentration and duration

    # Validating Artificial Data Loader
    VADL = Artificial_DataLoader(args.world_size, args.local_rank, device, validation_f, sampling_rate,
                                 number_of_concentrations, number_of_durations, number_of_diameters,
                                 window, length, args.batch_size)

    if args.verbose:
        print('From rank {} training shard size is {}'. format(args.local_rank, TADL.get_number_of_avail_windows()))
        print('From rank {} validation shard size is {}'. format(args.local_rank, VADL.get_number_of_avail_windows()))








































    if args.run:
        arguments = {'model': detr,
                     'device': device,
                     'epoch': 0,
                     'VADL': VADL}

        if args.local_rank == 0:
            run_model(args, arguments)

        return

    #if args.statistics:
        #arguments = {'model': model,
                     #'device': device,
                     #'epoch': 0,
                     #'VADL': VADL}

        #[duration_errors, amplitude_errors] = compute_error_stats(args, arguments)
        #if args.local_rank == 0:
            #plot_stats(VADL, duration_errors, amplitude_errors)

        #return


    #if args.evaluate:
        #arguments = {'model': model,
                     #'device': device,
                     #'epoch': 0,
                     #'VADL': VADL}

        #[duration_error, amplitude_error] = validate(args, arguments)
        #print('##Duration error {0}\n'
              #'##Amplitude error {1}'.format(
              #duration_error,
              #amplitude_error))

        #return

    if args.plot_training_history and args.local_rank == 0:
        Model_Util.plot_detector_stats(loss_history, precision_history)
        hours = int(total_time.sum / 3600)
        minutes = int((total_time.sum % 3600) / 60)
        seconds = int((total_time.sum % 3600) % 60)
        print('The total training time was {} hours {} minutes and {} seconds' .format(hours, minutes, seconds))
        hours = int(total_time.avg / 3600)
        minutes = int((total_time.avg % 3600) / 60)
        seconds = int((total_time.avg % 3600) % 60)
        print('while the average time during one epoch of training was {} hours {} minutes and {} seconds' .format(hours, minutes, seconds))
        return


    for epoch in range(args.start_epoch, args.epochs):
        
        arguments = {'detr': detr,
                     'criterion': criterion,
                     'optimizer': optimizer,
                     'device': device,
                     'epoch': epoch,
                     'TADL': TADL,
                     'VADL': VADL,
                     'loss_history': loss_history,
                     'precision_history': precision_history}

        # train for one epoch
        epoch_time, avg_batch_time = train(args, arguments)
        total_time.update(epoch_time)

        # validate every val_freq epochs
        if epoch%args.val_freq == 0 and epoch != 0:
            # evaluate on validation set
            print("\nValidating ...\nComputing mean average precision (mAP) for epoch {}" .format(epoch))
            precision = validate(args, arguments)
        else:
            precision = best_precision

        #if args.test:
            #break

        lr_scheduler.step()
        # remember the best detr and save checkpoint
        if args.local_rank == 0:
            if epoch%args.val_freq == 0:
                print('From validation we have precision is {} while best_precision is {}'.format(precision, best_precision))

            is_best = precision > best_precision
            best_precision = max(precision, best_precision)
            Model_Util.save_checkpoint({
                    'arch': 'DETR_' + args.feature_predictor_arch,
                    'epoch': epoch + 1,
                    'best_precision': best_precision,
                    'state_dict': detr.state_dict(),
                    'criterion': criterion.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'loss_history': loss_history,
                    'precision_history': precision_history,
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'total_time': total_time
            }, is_best)

            print('##Detector precision {0}\n'
                  '##Perf {1}'.format(
                  precision,
                  args.total_batch_size / avg_batch_time))