예제 #1
0
    def get_ground_truth(self,dataset):
        print "our dataset is {0}".format(dataset)
        data = dataset.replace("new_","")
        if os._exists("./crawling/{0}/site.gold/{1}/{1}.gold".format(self.date,data)):
            print "./crawling/{0}/site.gold/{1}/{1}.gold".format(self.date,data)
            gold_file = open("./crawling/{0}/site.gold/{1}/{1}.gold".format(self.date,data)).readlines()
        elif os._exists("./{0}/site.gold/{1}/{1}.gold".format(self.date,data)):
            gold_file = open("./{0}/site.gold/{1}/{1}.gold".format(self.date,data)).readlines()
            print "./{0}/site.gold/{1}/{1}.gold".format(self.date,data)
        else:
            print "annotation starts"
            a = annotator(dataset)
            self.ground_truth = a.get_ground_truth(self.path_list)
            return None

        gold_dict = self.build_gold(gold_file)
        #print self.folder_path
        print gold_dict.keys()
        print "length is ", len(gold_dict.keys())
        for i in range(len(self.pages)):
            # here {}/sample instead of {}_samples
            #path = self.pages[i].path.replace("../Crawler/{0}/samples/{1}/".format(self.date,data),"")
            path = self.pages[i].path.replace("../../Crawler/{0}/samples/{1}/".format(self.date,data),"")
            #print path.strip()
            id = int(gold_dict[path.strip().replace(" ","")])
            self.ground_truth.append(id)
        '''
예제 #2
0
def graphgethttp():
    datas = db.readtaskid(config.cronhttp)
    for i in range(0, len(datas)):
        data = ""+config.httphost+"-"+str(datas[i])+"/taskId="+str(datas[i])+",type=http,url="+config.cronhttp+""
        task = db.readhttpgetid(data)
        for p in range(0, len(task)):
            req = urllib2.Request(config.graph_ips + str(task[p]) + "&start=-3600&cf=")
            print config.graph_ips + str(task[p]) + "&start=-3600&cf="
            count = 0
            try:
                response = urllib2.urlopen(req).readline()
                ss = json.loads(response)
                if len(ss["series"]) < 1:
                    print "graph nodata"
                    os._exists(0)
                for j in range(0,11):
                    value = ss["series"][0]["data"][j]
                    if value[1] == "null":
                        count = count + 1
                if count < 11 and ss["title"].index(data):
                    print "graph response suss"
                else:
                    print "graph response fail"
            except Exception, e:
                print e
                print "graph response fail"
예제 #3
0
파일: IDEA3.py 프로젝트: jiessie/alfred
def main(wf):
    try:
        import xml.etree.cElementTree as ET
    except ImportError:
        import xml.etree.ElementTree as ET
    args = wf.args
    query = ""
    if len(wf.args)==1: 
        query= wf.args[0]
    workspaces = []
    IDEAIndex =[]
    ideaFolder = []
    if query==r'/rebuild':
        os.remove('IDEA.index')
        wf.add_item("Rebuild Search Index","Please don't open on this item", arg='', autocomplete=None, uid = -1)
        wf.send_feedback()

    # prepare for Index
    try :
        if os._exists('IDEA.index'):
            for ind in open('IDEA.index'):
                IDEAIndex.append(ind.strip('\n'))
        else:
            #read workspaces from workspace.conf
            for line in open('workspaces.conf'):
                workspaces.append(line.strip('\n') )
            indexFile = open('IDEA.index','w')
            for rootdir in workspaces:
                rootdir_levels = rootdir.split('/')
                for root,subFolders,files in os.walk(rootdir):
                    nested_levels = root.split('/')
                    if '.idea' in subFolders:
                        ideaFolder.append(root)
                        indexFile.write(root+"\n")
                    if(len(nested_levels)-len(rootdir_levels)>2):
                        del subFolders[:]
            indexFile.close()
    except IOError:
        if os._exists('workspaces.conf')==False:
            wf.add_item("Workspaces.conf not found","Please open workflow folder and configure your workspaces.conf", arg='', autocomplete=None, uid = -1)
        else:
            wf.add_item("IOError","Please Check Configuration", arg='', autocomplete=None, uid = -1)
    
    if len(ideaFolder) > 0 :
        index = 0
        for item in ideaFolder:
            title = os.path.split(item)[1]
            if query!="" and title.find(query)==-1 :
                continue
            subtitle = item
            wf.add_item(title,subtitle, arg=item, autocomplete=None, uid = index)
            index += 1
    wf.send_feedback()
예제 #4
0
def heralick_from_image(input, outdir, params,xyoff,nbands=8, debug = False):
    """
    :param input:
    :param outdir:
    :param params:
    :param xyoff:
    :param nbands:
    :param debug:
    :return:
    """

    if not os._exists(outdir):
        os.mkdir(outdir)

    for i in range(1, nbands + 1):
        params[4] = str(i)  # set the channel

        # get image min and max
        min, max = utility.get_minmax(inputimage, i)
        print(min, max)

        params[8] = str(min)
        params[10] = str(max)

        # update parameters with the offset angle
        for x, y in xyoff:
            newparams = params + ['-parameters.xoff', str(x), '-parameters.yoff', str(y)]
            newparams[12] = outdir + '/HaralickChannel' + newparams[4] + newparams[6] + 'xoff' + newparams[
                14] + 'yoff' + newparams[16] + '.tif'  # set output
            print("computing haralick for " + newparams[2] + "\n" + "channel " + newparams[4] + " wait for a few hours :D ")
            msg, err = compute_haralick(newparams, debug)
            if err:
                if not (err == "\r\n" or err == "\r" or err == "\n"):  # windows, oldmac, unix
                    print(" some heraick from the image could not be created , script is stopping")
                    sys.exit(1)
예제 #5
0
def tf_idf(seg_files):
    seg_path = './segfile/'
    corpus = []
    for file in seg_files:
        fname = seg_path + file
        f = open(fname, 'r+')
        content = f.read()
        f.close()
        corpus.append(content)

    vectorizer = CountVectorizer()
    transformer = TfidfTransformer()
    tfdif = transformer.fit_transform(vectorizer.fit_transform(corpus))
    word = vectorizer.get_feature_names()
    weight = tfdif.toarray()

    save_path = './tfidffile'
    if not os._exists(save_path):
        os.mkdir(save_path)

    for i in range(len(weight)):
        print('--------Writing all the tf-idf in the', i, u' file into ', save_path + '/' + string.zfill(i, 5) + '.txt',
              '--------')
        f = open(save_path + '/' + string.zfill(i, 5) + '.txt', 'w+')
        for j in range(len(word)):
            f.write(word[j] + ' ' + str(weight[i][j]) + '\r\n')
        f.close()
예제 #6
0
 def init_connection(self):
     db_filename = self.db_name + ".sqlite3"
     self.conn_db = sqlite3.connect(db_filename)
     self.curs_db = self.conn_db.cursor()
     if not os._exists( db_filename):
         self.init_db()
         self.refill_db_from_wg_api()
예제 #7
0
    def destroy(self):
        if self.molecule._state.created:
            self._vagrant.destroy()

        if os._exists(self.molecule.config.config['molecule'][
                'vagrantfile_file']):
            os.remove(self.molecule.config.config['molecule'][
                'vagrantfile_file'])
예제 #8
0
def loadpairemoisan(paire,mois,an):
    URL = "http://www.histdata.com/download-free-forex-historical-data/?/ninjatrader/tick-bid-quotes/"+paire+"/"+an+"/"+mois+"/"
    FileName = "HISTDATA_COM_NT_"+paire+"_T_BID_"+an+mois+".zip"
    RealFileName = "HISTDATA_COM_NT_"+paire+"_T_BID"+an+mois+".zip"

    driver.get(URL)

    toclic = driver.find_element_by_link_text(FileName) #le nom de fichier est l'objet clicable
    notseen = True
    while (notseen):
        notseen =False
        try:
           toclic.click()
        except selenium.common.exceptions.ElementNotVisibleException :
            notseen = True
        except selenium.common.exceptions.WebDriverException :
            notseen = True


    pathdownload = "c:/tmp/"+RealFileName #attention le nom est different entre le zip et le lien clic

    #ici on attend la fin du telechargement
    notfound = True
    while (notfound):
        if os.path.exists(pathdownload):
            notfound = False

    print("found")
    # on cherche dans le zip un fichier csv
    #on le decompress dans tmp
    #puis on enleve le zip
    notok = True

    while notok:
        try:
            with zipfile.ZipFile(pathdownload, 'r') as zf:
                print (zf.filelist)
                for i in zf.filelist:
                    if i.filename.find(".csv") != -1 :
                        print("extract dans c:\\tmp : ",i.filename)
                        newpath = zf.extract(i,path="c:/tmp")
                        print('new path ',newpath, 'rename to',"c:\\tmp\\"+paire+".csv")
                        os.rename(newpath,"c:\\tmp\\"+paire+".csv")
                        notok = False

                zf.close()

        except PermissionError:
            #zf.close()
            if os._exists("c:\\tmp\\" + paire + ".csv"):
                os.remove("c:\\tmp\\" + paire + ".csv")
            print('redo')


    os.remove(pathdownload)

    return newpath,"c:\\tmp\\"+paire+".csv"
예제 #9
0
def scrub_dir():
    for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/html_parsed/"):
        os.remove('html_parsed/{0}'.format(title))
    for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/txt_files/"):
        os.remove('txt_files/{0}'.format(title))
    for title in os.listdir("/Users/patrickeells/PycharmProjects/1q84/search_match/"):
        os.remove('search_match/{0}'.format(title))
    if os._exists('compiled.txt')==True:
        os.remove('complied.txt')
    return
예제 #10
0
파일: docker.py 프로젝트: digcat/grua
def wait_for_up(container, config):
    upwhen = config['upwhen']
    timeout = 30
    if upwhen.has_key('timeout'):
        timeout = get_value(upwhen, 'timeout')

    if upwhen.has_key('logmsg'):
        logmsg = get_value(upwhen, 'logmsg')

        if upwhen.has_key('logfile'):
            logfile = mem.VolumePath + "/" + mem.Project + "/" + container + "/" + get_value(upwhen, 'logfile')
            mention("Waiting up to " + str(
                timeout) + " seconds for '" + logmsg + "' in '" + logfile + "' to indicate that " + container + " is stacked")

        else:
            mention("Waiting up to " + str(
                timeout) + " seconds for '" + logmsg + "' to indicate that " + container + " is stacked")

        waited = 0
        ok = False

        while waited <= timeout:
            if not 'logfile' in locals():
                command = ["docker", "logs", get_container(container)]
            else:
                if logfile.startswith('/'):
                    command = ["tail", logfile]

                else:
                    # there's a chance we try to tail it before it exists... just ignore that time
                    if os._exists(logfile):
                        command = ["tail", logfile]

            # command may not have been set yet if the file didn't exist
            if 'command' in locals():
                try:
                    output = subprocess.check_output(command, stderr=subprocess.STDOUT)
                except:
                    pass
            # print output

            if 'output' in locals() and output.find(logmsg) > -1:
                ok = True
                break
            else:
                time.sleep(1)
                waited = waited + 1

        if not ok:
            raise Exception("Timed out waiting for " + container + " to start")

    if upwhen.has_key('sleep'):
        mention("Sleeping " + str(upwhen['sleep']) + " extra seconds as configured")
        time.sleep(int(upwhen['sleep']))
예제 #11
0
def getDirectory(remotePath,localPath):


    destPath=localPath+"/TabletLogs/"



    if os._exists(destPath):  # if path is available do not create anymore
        os.mkdir(destPath)

    srv.get_d(remotePath,destPath,preserve_mtime=True)
예제 #12
0
파일: tests.py 프로젝트: ratilicus/gallery
    def get_app(self):
        self.db = motor.MotorClient(host='db').test_gallery
        self.sync_db = pymongo.MongoClient(host='db').test_gallery
        self.UPLOAD_PATH = '/data/test_uploads/'
        if os._exists(self.UPLOAD_PATH):
            print 'UPLOAD PATH {} exists.. removing'.format(self.UPLOAD_PATH)
            shutil.rmtree(self.UPLOAD_PATH)

        self.settings = web.SETTINGS
        self.settings.update(autoreload=False, UPLOAD_PATH=self.UPLOAD_PATH, db=self.db)
        self.app = web.make_app(self.settings)
        return self.app
예제 #13
0
 def checkCookieValid(self, hostname):
     if not self.__init:
         self.__initMetadata()
     try:
         now = datetime.datetime.utcnow()
         diff = now - self.__metadata[hostname]['last_login']
         import os
         if not os._exists(self.httpRequest.cookiePath):
             print("there is no cookie for authentication at ", self.httpRequest.cookiePath)
             return False
         return not diff.total_seconds() // 60 > self.__metadata[hostname]['expire_time']
     except Exception:
         return False
    def test_writeFarfieldData(self):
        # Open file and get variables
        filename = "testWriteFile.dat"
        theta_grid = np.array([[0, 1, 2], [0, 1, 2]])
        phi_grid = np.array([[0, 0, 0], [1, 1, 1]])
        gain_grid = np.array([[0, 1, 2], [3, 4, 5]])

        if os._exists(filename):
            os.remove(filename)

        rFF.write_farfield_gain_datafile(filename, theta_grid, phi_grid, gain_grid)

        file_handle = open(filename)
        reader = csv.reader(file_handle, delimiter=' ')

        theta = []
        phi = []
        data = []

        # Read header and body of file
        line_no = 0
        for row in reader:
            string = '    '.join(row)
            elements = string.split()
            if line_no == 0:
                width = float(elements[0])
                height = float(elements[1])
            else:
                theta.append(float(elements[0]))
                phi.append(float(elements[1]))
                data.append(float(elements[2]))
            line_no += 1

        # Close file after reading
        file_handle.close()

        # Convert arrays to numpy array types
        theta = np.array(theta)
        phi = np.array(phi)
        data = np.array(data)

        test_theta_grid = np.reshape(theta, (width, height))
        test_phi_grid = np.reshape(phi, (width, height))
        test_gain_grid = np.reshape(data, (width, height))

        for y in np.arange(len(theta_grid)):
            for x in np.arange(len(theta_grid[0])):
                self.assertEqual(theta_grid[y][x], test_theta_grid[y][x])
                self.assertEqual(phi_grid[y][x], test_phi_grid[y][x])
                self.assertEqual(gain_grid[y][x], test_gain_grid[y][x])
def load_cars(split=0.8):
    # Vehicle images are courtecy of German Aerospace Center (DLR)
    # Remote Sensing Technology Institute, Photogrammetry and Image Analysis
    # http://www.dlr.de/eoc/en/desktopdefault.aspx/tabid-5431/9230_read-42467/
    if not os._exists('./data/cars.pkl'):
        print('Extracting cars dataset')
        with zipfile.ZipFile('./data/cars.pkl.zip', "r") as z:
            z.extractall("./data/")

    with open('./data/cars.pkl', 'rb') as ff:
        (X_data, y_data) = pickle.load(ff)
    X_data = X_data.reshape(X_data.shape[0], 3, 32, 32)
    l = int(split * X_data.shape[0])
    X_train = X_data[:l]
    X_test = X_data[l:]

    return X_train, X_test
def main(argv):
    directory = parse_parameters(argv)

    # sort files
    sorted_files = os.listdir(directory)
    sorted_files.sort(key=alphanum_key)

    file_count = len(sorted_files)

    #define backup directory
    backup_directory = os.path.join(directory, 'original')
    if not os._exists(backup_directory):
        os.mkdir(backup_directory)

    counter = 0

    # define progress bar
    bar = progressbar.ProgressBar(maxval=file_count, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()

    for item in sorted_files:
        full_file_name = os.path.join(directory, item)
        if os.path.isfile(full_file_name):
            path, file_name = os.path.split(full_file_name)
            extension = os.path.splitext(file_name)[1].lower()
            counter += 1
            bar.update(counter)

            #backup
            shutil.copy(full_file_name, backup_directory)

            #rename file
            new_file_name = str(counter).zfill(3) + extension
            new_full_file_name = os.path.join(path, new_file_name)

            os.rename(full_file_name, new_full_file_name)

            #resize file
            subprocess.call('convert {:s} -resize 500x500 "{:s}";'.format(new_full_file_name, new_full_file_name), shell=True)

            #create thumbnail
            command='convert {:s} -resize 75x75 -background white -gravity center -extent 75x75 -quality 75' \
                    ' "{:s}/s{:s}";'.format(new_full_file_name, path, new_file_name)
            subprocess.call(command, shell=True)

    bar.finish()
    def test_readFarfieldData(self):
        # Open file and get variables
        filename = "testWriteFile.dat"
        theta_grid = np.array([[0, 1, 2], [0, 1, 2]])
        phi_grid = np.array([[0, 0, 0], [1, 1, 1]])
        gain_grid = np.array([[0, 1, 2], [3, 4, 5]])

        if os._exists(filename):
            os.remove(filename)

        rFF.write_farfield_gain_datafile(filename, theta_grid, phi_grid, gain_grid)
        test_theta_grid, test_phi_grid, test_gain_grid = rFF.read_farfield_gain_datafile(filename)

        for y in np.arange(len(theta_grid)):
            for x in np.arange(len(theta_grid[0])):
                self.assertEqual(theta_grid[y][x], test_theta_grid[y][x])
                self.assertEqual(phi_grid[y][x], test_phi_grid[y][x])
                self.assertEqual(gain_grid[y][x], test_gain_grid[y][x])
예제 #18
0
def analyse_dir(output_dir, X, masker):
    output_files = os.listdir(output_dir)
    records = []
    objectives = []
    l1l2s = []
    analysis = {}
    if os._exists(join(output_dir, 'analysis.json')):
        return
    try:
        with open(join(output_dir, 'results.json'), 'r') as f:
            results = json.load(f)
    except IOError:
        return

    reduction = int(results['reduction'])
    filenames = sorted(fnmatch.filter(output_files,
                                      'record_*.nii.gz'),
                       key=lambda t: int(t[7:-7]))
    timings = []
    for filename in filenames[::reduction]:
        record = int(filename[7:-7])
        timing = results['timings'][record]
        print('Record %i' % record)
        objective, density = compute_objective_l1l2(X, masker,
                                                    join(output_dir, filename),
                                                    alpha=results['alpha'])
        timings.append(timing)
        records.append(record)
        objectives.append(objective)
        l1l2s.append(density)

    order = np.argsort(np.array(records))
    objectives = np.array(objectives)[order].tolist()
    l1l2s = np.array(l1l2s)[order].tolist()
    records = np.array(records)[order].tolist()
    timings = np.array(timings)[order].tolist()
    analysis['records'] = records
    analysis['objectives'] = objectives
    analysis['densities'] = l1l2s
    analysis['timings'] = timings
    with open(join(output_dir, 'analysis.json'), 'w+') as f:
        json.dump(analysis, f)
예제 #19
0
def get_level_image():
    try:
        level_id = get_arg("id")
        size = (int(get_arg("x")), int(get_arg("y")))

        if level_id is None:
            raise MissingInformation("id")
        try:
            level_id = int(level_id)
        except ValueError:
            raise InvalidInformation("id", "Not an integer")

        conn = engine.connect()
        query = sql.select([Level.name, Level.creator, Level.timestamp])\
            .where(Level.id == level_id).limit(1)
        res = conn.execute(query)
        rows = res.fetchall()
        if len(rows) != 1:
            raise InvalidInformation("id", "Not a level")

        for row in rows:
            imagepath = "levels/%s/%s-%s.png" % (str(row["creator"]), str(row["name"]), str(row["timestamp"]))
            if not _exists(imagepath):
                imagepath = "static/images/logo.png"
            if any(x is None for x in size):
                cropped = open(imagepath).read()
            else:
                cropped = get_and_crop(imagepath, size)
                cropped = surf_to_string(cropped)

            return make_response(
                cropped,
                200,
                {"Content-type": "image/png"}
            )

    except InvalidInformation as e:
        return make_error(e.message)
    except MissingInformation as e:
        return make_error(e.message)
예제 #20
0
    def saveRelatioGraph(matrix, name):
        """

        :param matrix:
        :param name:
        :return:
        """
        graph1 = nx.from_numpy_matrix(matrix, create_using=nx.MultiDiGraph())
        pos = {}
        labels = {}
        nodeColors = []
        for i in range(0, np.shape(matrix)[1]):
            pos[i] = (i * 10, np.shape(matrix)[1] * 10 - (sum(matrix[:, i]) * 10))
            labels[i] = i + 1
            if matrix.item(i, i) == 1:
                nodeColors.append('g')
            else:
                nodeColors.append('r')

        import matplotlib.pyplot as plt

        nx.draw(graph1,
                with_labels=True, pos=pos, labels=labels,
                node_color=nodeColors,
                label='Something',
                facecolor='red')

        path = 'Temporal Images/'
        extension = '.png'
        imageName = path + name + extension

        if os._exists(imageName):
            os.remove(imageName)
            print "File " + imageName + " deleted successfully"

        plt.savefig(imageName, facecolor='lightgrey')
        print "File " + imageName + " created successfully"
        plt.close()
        return imageName
예제 #21
0
def cleanup(configuration, directory):
    '''
    Removes any files that are not part of the results and were produced
    in any of the previous steps.
    :param configuration:
    :param directory:
    :return:
    '''
    print "Cleaning temporary files for %s" % directory
    os.chdir(directory)
    if utils.getValueForKeyPath(configuration, 'postprocessing.tracks.meanie3D-trackstats.vtk_tracks'):
        for filename in glob.glob('*.vtk'):
            os.remove(filename)

    if utils.getValueForKeyPath(configuration, 'postprocessing.tracks.meanie3D-trackstats.gnuplot'):
        for filename in glob.glob('*.gp'):
            os.remove(filename)

    if (os._exists("visitlog.py")):
        os.remove("visitlog.py")

    os.chdir("..")
    return
예제 #22
0
print('./build/tools/alignment_tools run_test_on_wflw \
--input_file_1='+testFile+' '\
'--input_file_2=./meanpose/meanpose_71pt.txt \
--input_folder=./datasets/WFLW/WFLW_images/ \
--model_path=./models/WFLW/WFLW_final/ \
--output_file_1=./datasets/ourVideo/pred_98pt_largepose.txt \
--label_num=196 --thread_num=4')
os.system('./build/tools/alignment_tools run_test_on_wflw \
--input_file_1='+testFile+' '\
'--input_file_2=./meanpose/meanpose_71pt.txt \
--input_folder=./datasets/WFLW/WFLW_images/ \
--model_path=./models/WFLW/WFLW_final/ \
--output_file_1=./datasets/ourVideo/pred_98pt_largepose.txt \
--label_num=196 --thread_num=4' )

if not os._exists(pre_image_path):
    os.mkdir(pre_image_path)
txtR = open(testFile)
for fileName in fileNameList:
    pre_landmark = txtR.readline()
    pre_landmark = pre_landmark.split(' ')
    landmark = pre_landmark[0:-2]
    if fileName == pre_landmark[-1]:
        img = Image.open(fileName)
        for i in range(len(landmark)/2):
            rr,cc = draw.circle(landmark[i],landmark[i+1],5)
            draw.set_color(img,[rr,cc],[0,255,0])
        img.save(pre_image_path+fileName)

print('ffmpeg -i '+pre_image_path+'\%04d.png'+' -vcodec mpeg4 '+pre_video_path+'output.mp4')
os.system('ffmpeg -i '+pre_image_path+'%04d.png'+' -vcodec mpeg4 '+pre_video_path+'output.mp4')
예제 #23
0
 def remove_inventory_file(self):
     if os._exists(self.config.config['ansible']['inventory_file']):
         os.remove(self.config.config['ansible']['inventory_file'])
    # kappa is from previous trajectory_point
    last_idx = len(new_localization.trajectory_point) - 1
    last_trajectory_point.path_point.kappa = \
        new_localization.trajectory_point[last_idx].path_point.kappa
    last_trajectory_point.path_point.s = sum_s
    trajectory_point.relative_time = relative_time


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="Generate future trajectory based on localization")
    parser.add_argument('path', type=str, help='rosbag file or directory')
    parser.add_argument('period',
                        type=float,
                        default=3.0,
                        help='duration for future trajectory')
    args = parser.parse_args()
    path = args.path
    period = args.period
    if not os.path.exists(path):
        logging.error("Fail to find path: {}".format(path))
        os._exists(-1)
    if os.path.isdir(path):
        pass
    if os.path.isfile(path):
        bag_name = os.path.splitext(os.path.basename(path))[0]
        path_out = os.path.dirname(path) + '/' + bag_name + \
                  '_with_future_trajectory.bag'
        future_pose_list = generate_future_pose_list(path, period)
        generate_future_traj(path, path_out, future_pose_list)
예제 #25
0
             corrupt=0.3)
    sdae.save_model(pretrain_path)
else:
    print('pretrained model exists')

t0 = time()
vade = VaDE(input_dim=args.gene_select,
            z_dim=10,
            n_centroids=n_centroids,
            binary=False,
            encodeLayer=[300, 100, 30],
            decodeLayer=[30, 100, 300],
            activation="relu",
            dropout=0,
            is_bn=False)
if os._exists(pretrain_path):
    print("Loading model from %s..." % pretrain_path)
    vade.load_model(pretrain_path)
print("Initializing through GMM..")
vade.initialize_gmm(train_loader)
print("basline of GMM and kmeans")
vade.gmm_kmeans_cluster(train_loader)
vade.fit(train_loader,
         model_name=args.model_name,
         save_inter=args.save_inter,
         lr=args.lr,
         batch_size=args.batch_size,
         num_epochs=args.epochs,
         anneal=True)
print("clustering time: ", (time() - t0))
save_path = 'model/' + args.model_name + '.pt'
예제 #26
0
 def remove_score_file_from_last_run(self):
     if os._exists(params.score_file):
         os.remove(params.score_file)
예제 #27
0
import os
import argparse
import pickle
import numpy as np
from sklearn.linear_model import LogisticRegression

parser = argparse.ArgumentParser()
parser.add_argument('data_dir')
parser.add_argument('output_dir')
args = parser.parse_args()
output_dir = args.output_dir

train_data_path = os.path.join(args.data_dir, 'train_features.pkl')
train_labels_path = os.path.join(args.data_dir, 'train_labels.pkl')
X_train = pickle.load(open(train_data_path, 'rb'))
y_train = pickle.load(open(train_labels_path, 'rb'))

logistic_regression = LogisticRegression(random_state=101, solver='liblinear')
logistic_regression.fit(X_train, y_train.values.ravel())

if not os._exists(output_dir):
    os.mkdir(output_dir)
model_path = os.path.join(output_dir, 'log_regression.pkl')
pickle.dump(logistic_regression, open(model_path, 'wb'))
예제 #28
0
파일: dl177.py 프로젝트: KiritoStudio/177DL
def main(): # main 模块
    recode = ''
    if os.path.exists(recodeFileName) == False:
        print('第一次运行,建立页面记录')
        os._exists(recodeFileName)
        # os.popen('touch recode')    # 判断是否首次执行脚本
        with open (recodeFileName,'w') as f:
            recode = '/html/category/tt/page/1'
            f.write(recode)
    else:
        print('读取上次停止下载页面')
        with open(recodeFileName,'r') as f:
            trecode = f.readline().replace('\n','')  # 读取记录
            recode = trecode.split('/')
            print('上次停止在第{0}页'.format(recode))
    url = 'http://' + sourceHost +'/html/category/tt'
    total_page = getSourcePageNumber()
    url_list = []
    for i in range(int(recode[-1]), total_page + 1):    # 根据记录选择开始页面
        url_list.append(url+'/page/'+str(i))
    # tmp = os.popen('ls').readlines()
    tmp = os.listdir(rootPath)
    allcomic = []
    for i in tmp:
        allcomic.append(i) # 读取目录列表,保存以便判断漫画是否下载
    del tmp
    for y in url_list:
        print('正在下载: ',y)
        with open(recodeFileName,'w') as f:
            wrotePart = ""
            yParts = y.split('/')
            for i in range(len(yParts)):
                if i == 0 or i == 1:
                    continue;
                else:
                    wrotePart += "/" + yParts[i]
            f.write(wrotePart)
        comic = getSource(y)
        while(len(comic) <= 0):
            print ("comic list should not be 0, retry")
            comic = getSource(y)
        print('下载列表:',comic)

        for x in comic:
            comic[x] = cleanName(comic[x])
            if ((comic[x]+'.cbr') in allcomic) == True:
                print(comic[x],'.cbr已经存在。')
            else:
                if (comic[x] in allcomic) == True: #匹配图片数量跟文件名上的数量是不是一样,一样就不需要重新下载
                    countList = imageCurrentCount(comic[x])
                    print("count in directory:" + str(countList[0]) + "/" + str(countList[1])) #有一些漫画实际数量比标称数量要少的,需要在做mark,防止下次再download
                    if(int(countList[0]) == int(countList[1])):
                        print(comic[x] + "无需重复下载")
                        # if (os.name != 'nt'):
                        #     command = 'rar a -r -s -m5\'' + comic[x] + '.cbr\' \'' + comic[x] + '\''
                        #     os.system(command)
                        continue
                    else:
                        if(os.path.exists(exclusionFileName)):
                            print('check exclusion')
                            with open(exclusionFileName, mode='r',encoding="utf-8") as f:
                                listAllExclusion = []
                                for line in f:
                                    line = line.replace("\n","")
                                    listAllExclusion.append(line)
                                print(comic[x] in listAllExclusion)
                                if (comic[x] in listAllExclusion):
                                    print(comic[x] + "in exclusion list, no need to download again")
                                    # if (os.name != 'nt'):
                                    #     command = 'rar a -r -s -m5\'' + comic[x] + '.cbr\' \'' + comic[x] + '\''
                                    #     os.system(command)
                                    continue
                print('正在下载: ',comic[x])
                if (os.path.exists(comic[x])) == True:
                    print('目录已经存在。')
                    os.chdir(comic[x])
                    downloadComic(x)
                    # if (os.name != 'nt'):
                    #     command = 'rar a -r -s -m5\''+comic[x]+'.cbr\' \''+comic[x]+'\'' # -df deleted because we need remain the folder
                    #     os.system(command)
                    # os.system('clear')
                else:
                    os.mkdir(comic[x])
                    os.chdir(comic[x])
                    downloadComic(x)
                    # if(os.name != 'nt'):
                    #     command = 'rar a -r -s -m5\''+comic[x]+'.cbr\' \''+comic[x]+'\''  # -df deleted because we need remain the folder
                    #     os.system(command)
                    # os.system('clear')
                #finished download check image lack
                imageCountList = imageCurrentCount(comic[x])
                if(int(imageCountList[0]) < int(imageCountList[1])):
                    #mark current to the exclusion file
                    with open(exclusionFileName, mode='a+',encoding="utf-8") as f:
                        listAllExclusion = []
                        for line in f:
                            listAllExclusion.append(line)
                        if(comic[x] in listAllExclusion):
                            print (comic[x] + " has in exclusion")
                        else:
                            print ("write " + comic[x] + " to exclusion")
                            print(comic[x], file=f) #f.write(comic[x])
예제 #29
0
파일: IDEA.py 프로젝트: jiessie/alfred
sys.setdefaultencoding('UTF-8')
query = ""
if len(sys.argv)==2: 
    query= sys.argv[1]
results = []
workspaces = []
IDEAIndex =[]
ideaFolder = []

if query==r'/rebuild':
    os.remove('IDEA.index')
    aitem = alfred.Item({'uid': -1, 'arg' : ""},"Rebuild Search Index", "please don't click on this item")
    results.append(aitem)
# prepare for Index
try :
    if os._exists('IDEA.index'):
        for ind in open('IDEA.index'):
            IDEAIndex.append(ind.strip('\n'))
    else:
        #read workspaces from workspace.conf
        for line in open('workspaces.conf'):
            workspaces.append(line.strip('\n') )

        indexFile = open('IDEA.index','w')
        for rootdir in workspaces:
            rootdir_levels = rootdir.split('/')
            for root,subFolders,files in os.walk(rootdir):
                nested_levels = root.split('/')
                if '.idea' in subFolders:
                    ideaFolder.append(root)
                    indexFile.write(root+"\n")
#Programa de Academia

#IMPORTA A INTERFACE GRAFICA E ARQUIVOS
from tkinter import *
import os

janela_principal = Tk()
janela_principal.title("SOFT GYM EVOLULION X")


#VERIFICA SE O ICONE ESTÀ NA PASTA E MOSTRA SE TRUE
if(not os._exists('treino128x128.ico')):
    janela_principal.iconbitmap('treino128x128.ico')


#DADOS DO USUARIO PADRÃO
nome = "JOAO".upper()

Senha = str(123456)
idade = 32
sexo = "MASCULINO"
altura = 1.75
peso = 71
biceps_direito = 28.25
biceps_esquerdo = 28.25
coxa_direita = 50.25
coxa_esquerda = 50.25
antibraco_direito = 25.50
antibraco_esquerdo = 25.50
panturrilha_direita = 32
panturrilha_esquerda = 33.50
예제 #31
0
 def worker_associate_and_upload_to_miner(self, upload):
     self.find_miners_within_the_same_epoch()
     candidate_miners = self._same_epoch_miner_nodes
     if self._is_miner:
         print("Worker does not accept other workers' updates directly")
     else:
         # not necessary to put self.find_miners_within_the_same_epoch() here again because if there are no same epoch miner found in the first try, there won't be any more as a worker will never be faster than a miner. A slow miner will also catch up by pow_consensus to the latest. Thus, pow_consesus will finally let this worker node catch up too. Otherwise, most probably there is no working miner in this network any more.
         while candidate_miners:
             miner_address = random.sample(candidate_miners, 1)[0]
             print(
                 f"This workder {self.get_ip_and_port()}({self.get_idx()}) picks {miner_address} as its associated miner and attempt to upload its updates..."
             )
             candidate_miners.remove(miner_address)
             # print(f"{PROMPT} This workder {self.get_ip_and_port()}({self.get_idx()}) now assigned to miner with address {miner_address}.\n")
             checked = False
             # check again if this node is still a miner
             response = requests.get(f'{miner_address}/get_role')
             if response.status_code == 200:
                 if response.text == 'Miner':
                     # check again if worker and miner are in the same epoch
                     response_epoch = requests.get(
                         f'{miner_address}/get_miner_epoch')
                     if response_epoch.status_code == 200:
                         miner_epoch = int(response_epoch.text)
                         if miner_epoch == self.get_current_epoch():
                             # check if miner is within the wait time of accepting updates
                             response_miner_accepting = requests.get(
                                 f'{miner_address}/within_miner_wait_time')
                             if response_miner_accepting.text == "True":
                                 checked = True
             if not checked:
                 print(
                     f"The picked miner {miner_address} is unavailable. Try resyncing chain first..."
                 )
                 # first try resync chain
                 if self.pow_consensus():
                     # TODO a worker should now do global updates to the point
                     print(
                         "A longer chain has found. Go to the next epoch.")
                     return
                 else:
                     if candidate_miners:
                         print(
                             "Not a longer chain found. Re-pick another miner and continue..."
                         )
                         continue
                     else:
                         print(
                             "Most likely there is no miner in the network any more. Please restart this node and try again."
                         )
                         os._exists(0)
             else:
                 # record this worker's address to let miner request this worker to download the block later
                 upload['this_worker_address'] = self._ip_and_port
                 # upload
                 response_miner_has_accepted = requests.post(
                     f"{miner_address}/new_transaction",
                     data=json.dumps(upload),
                     headers={'Content-type': 'application/json'})
                 retry_connection_times = RETRY_CONNECTION_TIMES
                 while True:
                     if response_miner_has_accepted.text == "True":
                         print(
                             f"Upload to miner {miner_address} succeeded!")
                         return
                     else:
                         if retry_connection_times:
                             print(
                                 f"Upload to miner error. {retry_connection_times} re-attempts left..."
                             )
                             retry_connection_times -= 1
                             # re-upload
                             response_miner_has_accepted = requests.post(
                                 f"{miner_address}/new_transaction",
                                 data=json.dumps(upload),
                                 headers={
                                     'Content-type': 'application/json'
                                 })
                         else:
                             candidate_miners.remove(miner_address)
                             if candidate_miners:
                                 print(
                                     f"Upload to miner error after {RETRY_CONNECTION_TIMES} attempts. Re-pick another miner and continue..."
                                 )
                                 break
                             else:
                                 print(
                                     "Most likely there is no miner in the network any more. Please restart this node and try again."
                                 )
                                 os._exists(0)
예제 #32
0
파일: core.py 프로젝트: dbryant4/molecule
 def _remove_inventory_file(self):
     if os._exists(self._config.config['molecule']['inventory_file']):
         os.remove(self._config.config['molecule']['inventory_file'])
예제 #33
0
파일: load-model.py 프로젝트: atroudi/Rheem
def main():

    #inputFile = loadtxt("planVectorsSGD2-kmeans-simword-opportuneWordcount.txt", comments="#", delimiter=" ", unpack=False)

    currentDirPath = os.path.dirname(os.path.realpath(__file__))

    dirPath = str(Path.home())

    model = "nn"
    if (len(sys.argv) >= 2):
        model = sys.argv[1]

    if (len(sys.argv) >= 3):
        inputFile = loadtxt(sys.argv[2],
                            comments="#",
                            delimiter=" ",
                            unpack=False)
    else:
        inputFile = loadtxt(os.path.join(dirPath, ".rheem",
                                         "mlModelVectors.txt"),
                            comments="#",
                            delimiter=" ",
                            unpack=False)

    #size = 146;
    #start = 13;
    #size = 213
    size = 251
    start = 0
    dimInputFile = inputFile.ndim

    if (dimInputFile == 1):
        inputFile = numpy.reshape(inputFile, (-1, inputFile.size))
    x_test = inputFile[:, 0:size]
    y_test = inputFile[start:, size]

    # x_train = inputFile[:,0:size]
    # y_train = inputFile[:,size]
    #
    # x_test = inputFile[:,0:size]
    # y_test = inputFile[:,size]

    # load the model from disk
    if (model == "forest"):
        # load the model from disk
        filename = os.path.join(currentDirPath, "model-forest.sav")
        print("Loading model: " + filename)
        model = pickle.load(open(filename, 'rb'))
    elif (model == "nn"):
        filename = os.path.join(currentDirPath, 'nn.pkl')
        print("Loading model: " + filename)
        # Load the pipeline first:
        model = joblib.load(filename)

        # Then, load the Keras model:
        model.named_steps['mlp'].model = load_model(
            os.path.join(currentDirPath, 'keras_model.h5'))

    # fix random seed for reproducibility
    seed = 7
    numpy.random.seed(seed)
    #kfold = KFold(n_splits=10, random_state=seed)
    #results = cross_val_score(regr, x_train, y_train, cv=kfold)
    #accuracy_score(prediction,y_train)
    #print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
    prediction = model.predict(x_test)

    # for num in range(1,min([34,len(x_test)])):
    #     if num % 2 == 0:
    #         print("estimated time for " + str(x_test[num][size-2]) + "-" + str(x_test[num][size-1]) + " in java : " + str(
    #             prediction[num]) + "(real " + str(y_test[num]) + ")")
    #     else:
    #         print("estimated time for " + str(x_test[num][size-2]) + "-" + str(x_test[num][size-1]) + " in spark : " + str(
    #             prediction[num]) + "(real " + str(y_test[num]) + ")")

    # print results to text
    if (len(sys.argv) >= 4):
        saveLocation = loadtxt(sys.argv[3],
                               comments="#",
                               delimiter=" ",
                               unpack=False)
    else:
        saveLocation = os.path.join(dirPath, ".rheem", "estimates.txt")

    # delete first
    if (os._exists(saveLocation)):
        os.remove(saveLocation)
    text_file = open(saveLocation, "w")
    # print estimates
    dimResults = prediction.ndim
    if (dimResults == 0):
        text_file.write("%d" % prediction)
        text_file.write("\n")
    else:
        for num in range(0, prediction.size):
            t = prediction[num]
            text_file.write("%d" % prediction[num])
            text_file.write("\n")
    text_file.close()
    print("estimation done!")
예제 #34
0
import os
from dotenv import load_dotenv

dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os._exists(dotenv_path):
    load_dotenv(dotenv_path)

from bluelog import create_app

app = create_app('production')
예제 #35
0
def bunch_comment_spider():
    if os._exists(kybook_path):
        os.remove(kybook_path)
    for i in range(20):
        comment_spider(i)
        time.sleep(random.random() * 5)
예제 #36
0
# python 2.7
import os
import ctypes
import tkFileDialog
import re
import ICIfunctions as ICI

# name of text file to write pixel-level contents
oFile = "data/processed/metaFromImage/metaFromImage.txt"
# x Pixel (origin = 0 at "top left" of image)
xPix = ctypes.c_int(333)
# y pixel (origin = 0 at "top left" of image)
yPix = ctypes.c_int(461)
if os._exists(oFile):
    os.remove(oFile)

# interactive image load
imgFileNames = tkFileDialog.askopenfilenames()

with open(oFile, 'w') as f:
    for imgFileName in imgFileNames:

        print(imgFileName)
        if not re.search(".jpg", imgFileName):
            continue

        creationTime = os.path.getctime(imgFileName)
        print(creationTime)
        # get the "ihandle" image handle
        ihandle = ICI.ImgLoad(imgFileName)
예제 #37
0
 def remove_inventory_file(self):
     if os._exists(self.config.config['ansible']['inventory_file']):
         os.remove(self.config.config['ansible']['inventory_file'])
예제 #38
0
 def test_empty_trash_async(self):
   with self.temporary_workdir() as work_dir:
     trash_dir = os.path.join(work_dir, "trash")
     subprocess.call(["touch", trash_dir + "foo.txt"])
     self.assert_success(self.run_pants_with_workdir(["clean-all", "--async"], work_dir))
     self.assertFalse(os._exists(trash_dir))
예제 #39
0
def AnalyzePhase(AtPct=None, WtPct=None, OxWtPct=None):

    #Normalize our AtPct vector.
    AtPct = AtPct/sum(AtPct)*100

    # A dictionary of the AtPct values would be useful so we can look up by element name.
    E = dict(zip(pb.ElementalSymbols, AtPct))


    ### We output an output string which contains Mg, Si and Fe ratioed values.
    OutStr = '--- Simple At% ratios ---\n\n'

    OutStr += "Abundances ratioed to:\n"
    OutStr += "Element to   Mg       Si       Fe\n"
    OutStr += '-'*41 + '\n'
    for Zminus, E in enumerate(AtPct):
        if E != 0:
            EtoMg = E / AtPct[pb.Mg-1]
            EtoSi = E / AtPct[pb.Si-1]
            EtoFe = E / AtPct[pb.Fe-1]
            OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple([pb.ElementalSymbols[Zminus+1]]) + tuple([EtoMg, EtoSi, EtoFe]))



    ### We output an output string which contains ratios to chondritic (protosolar).
    OutStr += '--- Chondritic Analysis ---\n\n'

    # Load the prosolar abundances.  This is recorded from the Lodders ref with logarithmic values.
    ProtosolarAbundancesFileName = 'ProtosolarAbundances.csv'
    if not os._exists(ProtosolarAbundancesFileName):
        ProtosolarAbundancesFileName = os.path.join('ConfigData', ProtosolarAbundancesFileName)
    Protosolar = genfromtxt(ProtosolarAbundancesFileName, delimiter=',', skip_header=1, dtype=None)
    ProtosolarDict = dict(Protosolar)   # This dictionary could be handy...
    Protosolar = array(zip(*Protosolar)[1]) # But we really need just a numpy array with the numbers.

    # Convert to vectors which are normalized to Mg, Si, and Fe.
    ProtosolarToMg = power(10, Protosolar)  # Get out of log space into linear space.  Now the numbers relate to AtPct.
    ProtosolarToMg /= ProtosolarToMg[pb.Mg-1]
    ProtosolarToSi = power(10, Protosolar)
    ProtosolarToSi /= ProtosolarToSi[pb.Si-1]
    ProtosolarToFe = power(10, Protosolar)
    ProtosolarToFe /= ProtosolarToFe[pb.Fe-1]

    # Print out the abundances normalized to protosolar.
    Ratios = list() # Keep track of the ratios, so at the end we can compute standard deviations.
    OutStr += "Abundances ratioed to protosolar and normalized to:\n"
    OutStr += "Element to   Mg       Si       Fe\n"
    OutStr += '-'*41 + '\n'
    for Zminus, E in enumerate(AtPct):
        if E != 0:
            EtoMg = E / AtPct[pb.Mg-1]
            EtoSi = E / AtPct[pb.Si-1]
            EtoFe = E / AtPct[pb.Fe-1]
            Ratios.append([EtoMg/ProtosolarToMg[Zminus], EtoSi/ProtosolarToSi[Zminus], EtoFe/ProtosolarToFe[Zminus]])
            OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple([pb.ElementalSymbols[Zminus+1]]) + tuple(Ratios[-1]))
    Ratios = array(Ratios)
    Means = mean(Ratios, axis=0)
    Stdevs = std(Ratios, axis=0)
    OutStr += '-'*41 + '\n'
    OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple(['Mean']) + tuple(Means))
    OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple(['Standard dev']) + tuple(Stdevs))
    OutStr += '-'*41 + '\n'

    OutStr += '\nRefs:\n    Lodders, K. (2003). Solar System Abundances and Condensation Temperatures of the Elements. The Astrophysical ' \
              'Journal, 591(2), 1220-1247. http://doi.org/10.1086/375492\n' \
              '    Ishii, H. A., et al. (2008). Comparison of Comet 81P/Wild 2 Dust with Interplanetary Dust from Comets. Science, ' \
              '319(5), 447. http://doi.org/10.1126/science.1150683'

    ### Draw a plot comparing this spectrum normalized to CI and plotted against GEMS compositions.

    # First we have mean and standard deviation values for GEMS compositions.
    # Ishii 2008 GEMS mean (left) and std (right) values.
    IshiiAtPct = zeros(pb.U-1)
    IshiiAtPctSD = zeros(pb.U-1)
    IshiiAtPct[pb.O-1]  =  66.71;   IshiiAtPctSD[pb.O-1]  = 4.43
    IshiiAtPct[pb.Mg-1] =  9.37;    IshiiAtPctSD[pb.Mg-1] = 4.42
    IshiiAtPct[pb.Al-1] =  1.62;    IshiiAtPctSD[pb.Al-1] = 1.09
    IshiiAtPct[pb.Si-1] =  14.40;   IshiiAtPctSD[pb.Si-1] = 2.36
    IshiiAtPct[pb.S-1]  =  3.69;    IshiiAtPctSD[pb.S-1]  = 2.73
    IshiiAtPct[pb.Ca-1] =  0.82;    IshiiAtPctSD[pb.Ca-1] = 0.70
    IshiiAtPct[pb.Cr-1] =  0.12;    IshiiAtPctSD[pb.Cr-1] = 0.10
    IshiiAtPct[pb.Mn-1] =  0.02;    IshiiAtPctSD[pb.Mn-1] = 0.06
    IshiiAtPct[pb.Fe-1] =  6.39;    IshiiAtPctSD[pb.Fe-1] = 2.39
    IshiiAtPct[pb.Ni-1] =  0.40;    IshiiAtPctSD[pb.Ni-1] = 0.23

    # Make these Si normalized.
    SiTemp = IshiiAtPct[pb.Si-1]
    # IshiiRel is derived from IshiiAtPct, but is normalized against Si and normalized against chondritic.
    IshiiRel = copy(IshiiAtPct)/SiTemp
    IshiiRelSD = copy(IshiiAtPctSD)/SiTemp

    # And normalize to chondritic
    IshiiRel /= ProtosolarToSi[0:pb.U-1]
    IshiiRelSD /= ProtosolarToSi[0:pb.U-1]

    # Make a version of the sample quant which is ratioed to si
    AtPctToSi = AtPct / AtPct[pb.Si-1]
    # And chondritic
    AtPctToSi[:len(ProtosolarToSi)] /= ProtosolarToSi

    # Get the union of elements which are in our spectrum and in the GEMS mean values.
    # All indices for elements which have non zero values from either array.
    IncludedZ = hstack((nonzero(AtPct)[0], nonzero(IshiiRel)[0]))
    # Eliminate duplicates and make sure in ascending order.
    IncludedZ = sort(unique(IncludedZ))
    # Indices are 0 based, Z is 1 based.
    IncludedZ += 1

    # Get the list of element names for those elements.
    TickLabels = [El for Z, El in enumerate(pb.ElementalSymbols) if Z in IncludedZ]
    TickInds = range(len(TickLabels))

    IshiiInds = []
    IshiiVals = []
    IshiiErrs = []
    SpectrumInds = []
    SpectrumVals = []
    ChondriticInds = []
    ChondriticVals = []
    for Zminus1, Val in enumerate(AtPctToSi[:pb.U-1]):
        if IshiiRel[Zminus1] > 0:
            IshiiInds.append(TickLabels.index(pb.ElementalSymbols[Zminus1+1]))
            IshiiVals.append(IshiiRel[Zminus1])
            IshiiErrs.append(IshiiRelSD[Zminus1])
        if AtPct[Zminus1] > 0:
            SpectrumInds.append(TickLabels.index(pb.ElementalSymbols[Zminus1+1]))
            SpectrumVals.append(AtPctToSi[Zminus1])
        # This part only applies if not normalizing to chondritic.
        # if pb.ElementalSymbols[Zminus1+1] in TickLabels:
        #     ChondriticInds.append(TickLabels.index(pb.ElementalSymbols[Zminus1+1]))
        #     ChondriticVals.append(ProtosolarToSi[Zminus1])

    # We will be plotting so clear the plot that may already be plotted.
    plt.figure(1)
    plt.clf()

    # Ishii plot
    plt.scatter(IshiiInds, IshiiVals, marker='o', color='red', s=150, alpha=0.5, label='Ishii et al., 2008')
    #plt.errorbar(IshiiInds, IshiiVals, yerr=IshiiErrs, fmt='none', elinewidth=3, capsize=7, capthick=3, ecolor='red')
    plt.errorbar(IshiiInds, IshiiVals, yerr=IshiiErrs, fmt='none', alpha=0.5, elinewidth=5, capsize=0, capthick=3, ecolor='red')
    # This spectrum.
    plt.scatter(SpectrumInds, SpectrumVals, marker='v', color='blue', s=150,alpha=0.5, label='This Spectrum')
    # Chondritic
    # plt.scatter(ChondriticInds, ChondriticVals, marker='s', color='green', s=200,alpha=0.5)
    plt.axhline(1, 0, 92, color='green', linewidth=3, label='Chondritic')

    plt.xticks(TickInds, TickLabels, rotation='vertical')
    plt.gca().set_yscale('log')
    plt.legend()
    # plt.legend(['Ishii et al., 2008', 'This Spectrum', 'Chondritic'])
    plt.ylabel('Element/Si/chondritic, At%', fontsize=FontSizeBasis)
    plt.gca().set_ylim([3e-2, 30])
    plt.tight_layout()

    PrintTernary(AtPct, IshiiAtPct, IshiiAtPctSD)

    ShowLastPos(plt)

    return OutStr
예제 #40
0
def executeFixPointSimulation(directory_for_network, inputsArray, masks,initializationDic=None, outputList=None,
                              sparse=False, modes=["verbose","time","outputEqui"],
                              initValue=10**(-13), rescaleFactor=None):
    """
        Execute the simulation of the system saved under the directory_for_network directory.
        InputsArray contain the values for the input species.
    :param directory_for_network: directory path, where the files equations.txt and constants.txt may be found.
    :param inputsArray: The test concentrations, a t * n array where t is the number of test and n the number of node in the first layer.
    :param initializationDic: can contain initialization values for some species. If none, or the species don't appear in its key, then its value is set at initValue (default to 10**(-13)).
    :param masks: network masks
    :param outputList: list or string, species we would like to see as outputs, if default (None), then will find the species of the last layer.
                                      if string and value is "nameDic" or "all", we will give all species taking part in the reaction (usefull for debug)
    :param sparse: if sparse, usefull for large system
    :param modes: modes for outputs, don't accept outputPlot as it only provides value at equilibrium now.
    :param initValue: initial concentration value to give to all species
    :param rescaleFactor: if None, then computed as the number of nodes, else: used to divide the value of the inputs
    :param masks:
    :return:
            A result tuple depending on the modes.
    """

    assert "outputPlot" not in modes

    parsedEquation,constants,nameDic=read_file(directory_for_network + "/equations.txt", directory_for_network + "/constants.txt")
    if sparse:
        KarrayA,stochio,maskA,maskComplementary = sparseParser(parsedEquation,constants)
    else:
        KarrayA,stochio,maskA,maskComplementary = parse(parsedEquation,constants)
    KarrayA,T0,C0,constants=setToUnits(constants,KarrayA,stochio)
    print("Initialisation constant: time:"+str(T0)+" concentration:"+str(C0))

    speciesArray = obtainSpeciesArray(inputsArray,nameDic,initValue,initializationDic,C0)
    speciesArray,rescaleFactor = rescaleInputConcentration(speciesArray,nameDic=nameDic,rescaleFactor=rescaleFactor)

    ##SAVE EXPERIMENT PARAMETERS:
    attributesDic = {}
    attributesDic["rescaleFactor"] = rescaleFactor
    attributesDic["T0"] = T0
    attributesDic["C0"] = C0
    for k in initializationDic.keys():
        attributesDic[k] = speciesArray[0,nameDic[k]]
    for idx,cste in enumerate(constants):
        attributesDic["k"+str(idx)] = cste
    attributesDic["Numbers_of_Constants"] = len(constants)
    experiment_path=saveAttribute(directory_for_network, attributesDic)

    shapeP=speciesArray.shape[0]

    #let us assign the right number of task in each process
    num_workers = multiprocessing.cpu_count()-1
    idxList = findRightNumberProcessus(shapeP,num_workers)

    #let us find the species of the last layer in case:
    if outputList is None:
        outputList = obtainOutputArray(nameDic)
    elif type(outputList)==str:
        if outputList=="nameDic" or outputList=="all":
            outputList=list(nameDic.keys())
        else:
            raise Exception("asked outputList is not taken into account.")

    nbrConstant = int(readAttribute(experiment_path,["Numbers_of_Constants"])["Numbers_of_Constants"])
    if nbrConstant == 12: #only one neuron, it is easy to extract cste values
        k1,k1n,k2,k3,k3n,k4,_,k5,k5n,k6,kd,_=[readAttribute(experiment_path,["k"+str(i)])["k"+str(i)] for i in range(0,nbrConstant)]
    else:
        k1,k1n,k2,k3,k3n,k4,_,k5,k5n,k6,kd,_= [0.9999999999999998,0.1764705882352941,1.0,0.9999999999999998,0.1764705882352941,1.0,
                                               0.018823529411764708,0.9999999999999998,0.1764705882352941,1.0,0.018823529411764708,0.018823529411764708]

    inhibTemplateNames = obtainTemplateArray(masks=masks,activ=False)
    activTemplateNames= obtainTemplateArray(masks=masks,activ=True)
    TA = initializationDic[activTemplateNames[0]]/C0
    TI = initializationDic[inhibTemplateNames[0]]/C0
    E0 = initializationDic["E"]/C0
    kdI = kd
    kdT = kd

    myconstants = [k1,k1n,k2,k3,k3n,k4,k5,k5n,k6,kdI,kdT,TA,TI,E0]

    t=tm()
    print("=======================Starting Fixed Point simulation===================")
    copyArgs = obtainCopyArgsFixedPoint(idxList,modes,speciesArray,nameDic,outputList,masks,myconstants,chemicalModel="templateModel")
    with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
        myoutputs = pool.map(fixPointSolverForMultiProcess, copyArgs)
    pool.close()
    pool.join()
    print("Finished computing, closing pool")
    timeResults={}
    timeResults[directory_for_network + "_wholeRun"]= tm() - t

    if("outputEqui" in modes):
        outputArray=np.zeros((len(outputList), shapeP))
    times = []
    for idx,m in enumerate(myoutputs):
        if("outputEqui" in modes):
            try:
                outputArray[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputEqui")]
            except:
                raise Exception("error")
        if("time" in modes):
            times += [m[modes.index("time")]]
    if("time" in modes):
        timeResults[directory_for_network + "_singleRunAvg"] = np.sum(times) / len(times)
    # Let us save our result:
    savedFiles = ["false_result.csv","output_equilibrium.csv","output_full.csv"]
    for k in nameDic.keys():
        savedFiles += [k+".csv"]
    for p in savedFiles:
        if(os._exists(os.path.join(experiment_path, p))):
            print("Allready exists: renaming older")
            os.rename(os.path.join(experiment_path,p),os.path.join(experiment_path,p.split(".")[0]+"Old."+p.split(".")[1]))
    if("outputEqui" in modes):
        df=pandas.DataFrame(outputArray)
        df.to_csv(os.path.join(experiment_path, "output_equilibrium.csv"))
    results=[0 for _ in range(len(modes))]
    if("outputEqui" in modes):
        results[modes.index("outputEqui")]= outputArray
    if "time" in modes:
        results[modes.index("time")]=timeResults
    return tuple(results)
예제 #41
0
# Standard library demos
# https://docs.python.org/3/tutorial/stdlib.html

import os
import stat
import shutil
import glob
import re  # Regular Expressions
import random


print("Running app from ", os.getcwd())
dir = "testdir"
try:
    if not os._exists(dir):
        os.mkdir(dir)

    mode = os.stat(dir).st_mode
    print(stat.S_ISDIR(mode))

    if not stat.S_ISDIR(mode):
        os.mkdir(dir)
    print(dir, " created with mode: ", stat.filemode(mode))

    os.rename(dir, "testdir2")
    shutil.move("testdir2", dir)

except FileExistsError:
    pass
예제 #42
0
            if not data:
                break
            fobj.write(data)


def get_patt(fname, patt):
    patt_list = []
    cpatt = re.compile(patt)
    with open(fname, 'rb') as fobj:
        while True:
            try:
                line = fobj.readline().decode('utf8')
            except:
                continue
            if not line:
                break
            m = cpatt.search(line)
            if m:
                patt_list.append(m.group())
    return patt_list


if __name__ == '__main__':
    if not os._exists('/tmp/netease'):
        os.makedirs('/tmp/netease')
    download_file('http://sports.163.com/index.html', '/tmp/netease')
    url_patt = 'http://[^\s;)(:]+\.(png|jpeg|jpg)'
    url_list = get_patt('/tmp/netease/index.html', url_patt)
    for img_url in url_list:
        download_file(img_url, '/tmp/netease')
예제 #43
0
def deleteUseless(directory, filenamesToDelete):
    for file in filenamesToDelete:
        if (os._exists(directory + file)):
            os.unlink(directory + file)
import os
import cv2
import ReducirRuido
from skimage import img_as_ubyte


rutaOrigen = os.path.join("Datos","train")
print os._exists(rutaOrigen)
for base, dirs, files in os.walk(rutaOrigen):
    for name in files:
        img = cv2.imread(os.path.join(rutaOrigen, name))
        nomArch = name.split('.')[0]
        bordes = cv2.Canny(img, 100, 200)

        cv2.imshow(nomArch,img)
        cv2.imshow(nomArch + ": Bordes", bordes)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

#        img = img_as_ubyte(img)
 #       bordes = cv2.Canny(img, 100, 200)
  #      cv2.imshow(nomArch + " : CV_U8", img)
   #     cv2.imshow(nomArch + ": Bordes", bordes)
    #    cv2.waitKey(0)
     #   cv2.destroyAllWindows()

        img = ReducirRuido.denoiseMorfologico(img)
        bordes = cv2.Canny(img, 100, 200)
        cv2.imshow(nomArch + ":Morfologico", img)
        cv2.imshow(nomArch + ": Bordes", bordes)
        cv2.waitKey(0)
예제 #45
0
    def save_with_compare(self,
                          istruth=False,
                          params=None,
                          dview=None,
                          Cn=None):
        """save the comparison as well as the images of the precision recall calculations


            depending on if we say this file will be ground truth or not, it wil be saved in either the tests or the ground truth folder
            if saved in test, a comparison to groundtruth will be added to the object 
            this comparison will be on 
                data : a normized difference of the normalized value of the arrays
                time : difference
            in order for this function to work, you must
                have previously given it the cnm objects after initializing them ( on patch and full frame)
                give the values of the time and data 
                have a groundtruth


            Args:
                self:  dictionnary
                   the object of this class tha tcontains every value

                istruth: Boolean
                    if we want it ot be the ground truth

                params:
                    movie parameters

                dview :
                    your dview object

                n_frames_per_bin:
                    you need to know those data before
                    they have been given to the base/rois functions

                dims_test:
                    you need to know those data before
                    they have been given to the base/rois functions

                Cn:
                    your correlation image

                Cmap:
                    a particular colormap for your Cn

            See Also:
                Example of utilisation on Demo Pipeline
\image caiman/tests/comparison/data.pdf

             Raises:
                 ('we now have ground truth\n')

                 ('we were not able to read the file to compare it\n')

                """
        # getting the DATA FOR COMPARISONS
        assert (params != None and self.cnmpatch != None)
        logging.info('we need the parameters in order to save anything\n')
        # actions on the sparse matrix
        cnm = self.cnmpatch.__dict__
        cnmpatch = deletesparse(cnm)

        # initialization
        dims_test = [self.dims[0], self.dims[1]]
        dims_gt = dims_test
        dt = datetime.datetime.today()
        dt = str(dt)
        plat = plt.platform()
        plat = str(plat)
        pro = plt.processor()
        pro = str(pro)
        # we store a big file which contains everything (INFORMATION)
        information = {
            'platform': plat,
            'time': dt,
            'processor': pro,
            'params': params,
            'cnmpatch': cnmpatch,
            'timer': {
                'cnmf_on_patch': self.comparison['cnmf_on_patch']['timer'],
                'cnmf_full_frame': self.comparison['cnmf_full_frame']['timer'],
                'rig_shifts': self.comparison['rig_shifts']['timer']
            }
        }

        rootdir = os.path.abspath(cm.__path__[0])[:-7]
        file_path = os.path.join(caiman_datadir(), "testdata",
                                 "groundtruth.npz")

        # OPENINGS
        # if we want to set this data as truth
        if istruth:
            # we just save it
            if os._exists(file_path):
                os.remove(file_path)
                logging.debug("nothing to remove\n")
            np.savez_compressed(
                file_path,
                information=information,
                A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
                C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
                A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
                C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
                rig_shifts=self.comparison['rig_shifts']['ourdata'])
            logging.info('we now have ground truth\n')
            return

        else:  # if not we create a comparison first
            try:
                with np.load(file_path, encoding='latin1') as dt:
                    rig_shifts = dt['rig_shifts'][()]
                    A_patch = dt['A_patch'][()]
                    A_full = dt['A_full'][()]
                    C_full = dt['C_full'][()]
                    C_patch = dt['C_patch'][()]
                    data = dt['information'][()]
            # if we cannot manage to open it or it doesnt exist:
            except (IOError, OSError):
                # we save but we explain why there were a problem
                logging.warning('we were not able to read the file ' +
                                str(file_path) + ' to compare it\n')
                file_path = os.path.join(caiman_datadir(), "testdata",
                                         "NC" + dt + ".npz")
                np.savez_compressed(
                    file_path,
                    information=information,
                    A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
                    C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
                    A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
                    C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
                    rig_shifts=self.comparison['rig_shifts']['ourdata'])
                return
        # creating the FOLDER to store our data
        # XXX Is this still hooked up to anything?
        i = 0
        dr = os.path.join(caiman_datadir(), "testdata")
        for name in os.listdir(dr):
            i += 1
        i = str(i)
        if not os.path.exists(dr + i):
            os.makedirs(dr + i)
        information.update({'diff': {}})
        information.update({
            'differences': {
                'proc': False,
                'params_movie': False,
                'params_cnm': False
            }
        })
        # INFORMATION FOR THE USER
        if data['processor'] != information['processor']:
            logging.info(
                "you don't have the same processor as groundtruth.. the time difference can vary"
                " because of that\n try recreate your own groundtruth before testing. Compare: "
                + str(data['processor']) + " to " +
                str(information['processor']) + "\n")
            information['differences']['proc'] = True
        if data['params'] != information['params']:
            logging.warning(
                "you are not using the same movie parameters... Things can go wrong"
            )
            logging.warning(
                'you must use the same parameters to compare your version of the code with '
                'the groundtruth one. look for the groundtruth parameters with the see() method\n'
            )
            information['differences']['params_movie'] = True
        # We must cleanup some fields to permit an accurate comparison
        if not normalised_compare_cnmpatches(data['cnmpatch'], cnmpatch):
            if data['cnmpatch'].keys() != cnmpatch.keys():
                logging.error(
                    'DIFFERENCES IN THE FIELDS OF CNMF'
                )  # TODO: Now that we have deeply nested data structures, find a module that gives you tight differences.
            diffkeys = [
                k for k in data['cnmpatch']
                if data['cnmpatch'][k] != cnmpatch[k]
            ]
            for k in diffkeys:
                logging.info("{}:{}->{}".format(k, data['cnmpatch'][k],
                                                cnmpatch[k]))

            logging.warning(
                'you are not using the same parameters in your cnmf on patches initialization\n'
            )
            information['differences']['params_cnm'] = True

        # for rigid
        # plotting part

        information['diff'].update({
            'rig':
            plotrig(init=rig_shifts,
                    curr=self.comparison['rig_shifts']['ourdata'],
                    timer=self.comparison['rig_shifts']['timer'] -
                    data['timer']['rig_shifts'],
                    sensitivity=self.comparison['rig_shifts']['sensitivity'])
        })
        try:
            pl.gcf().savefig(dr + str(i) + '/' + 'rigidcorrection.pdf')
            pl.close()
        except:
            pass

        # for cnmf on patch
        information['diff'].update({
            'cnmpatch':
            cnmf(Cn=Cn,
                 A_gt=A_patch,
                 A_test=self.comparison['cnmf_on_patch']['ourdata'][0],
                 C_gt=C_patch,
                 C_test=self.comparison['cnmf_on_patch']['ourdata'][1],
                 dview=dview,
                 sensitivity=self.comparison['cnmf_on_patch']['sensitivity'],
                 dims_test=dims_test,
                 dims_gt=dims_gt,
                 timer=self.comparison['cnmf_on_patch']['timer'] -
                 data['timer']['cnmf_on_patch'])
        })
        try:
            pl.gcf().savefig(dr + i + '/' + 'onpatch.pdf')
            pl.close()
        except:
            pass

# CNMF FULL FRAME
        information['diff'].update({
            'cnmfull':
            cnmf(Cn=Cn,
                 A_gt=A_full,
                 A_test=self.comparison['cnmf_full_frame']['ourdata'][0],
                 C_gt=C_full,
                 C_test=self.comparison['cnmf_full_frame']['ourdata'][1],
                 dview=dview,
                 sensitivity=self.comparison['cnmf_full_frame']['sensitivity'],
                 dims_test=dims_test,
                 dims_gt=dims_gt,
                 timer=self.comparison['cnmf_full_frame']['timer'] -
                 data['timer']['cnmf_full_frame'])
        })
        try:
            pl.gcf().savefig(dr + i + '/' + 'cnmfull.pdf')
            pl.close()
        except:
            pass


# Saving of everything
        target_dir = os.path.join(caiman_datadir(), "testdata", i)
        if not os.path.exists(target_dir):
            os.makedirs(
                os.path.join(caiman_datadir(), "testdata", i)
            )  # XXX If we ever go Python3, just use the exist_ok flag to os.makedirs
        file_path = os.path.join(target_dir, i + ".npz")
        np.savez_compressed(
            file_path,
            information=information,
            A_full=self.comparison['cnmf_full_frame']['ourdata'][0],
            C_full=self.comparison['cnmf_full_frame']['ourdata'][1],
            A_patch=self.comparison['cnmf_on_patch']['ourdata'][0],
            C_patch=self.comparison['cnmf_on_patch']['ourdata'][1],
            rig_shifts=self.comparison['rig_shifts']['ourdata'])

        self.information = information
예제 #46
0
def get_credentials():
    if os._exists('credentials.txt') is True:
        return True
예제 #47
0
__author__ = 'Makhtar'

# Standard library demos
# https://docs.python.org/3/tutorial/stdlib.html

import os
import stat
import shutil
import glob
import re  # Regular Expressions
import random

print("Running app from ", os.getcwd())
dir = "testdir"
try:
    if not os._exists(dir):
        os.mkdir(dir)

    mode = os.stat(dir).st_mode
    print(stat.S_ISDIR(mode))

    if not stat.S_ISDIR(mode):
        os.mkdir(dir)
    print(dir, " created with mode: ", stat.filemode(mode))

    os.rename(dir, "testdir2")
    shutil.move("testdir2", dir)

except FileExistsError:
    pass
예제 #48
0
                            self.visualize_port = int(line[2])
                            continue
                        if line[0] == 'firewall_ip':
                            self.firewall_host = line[1]
                            self.firewall_port = int(line[2])
                            continue
                        self.fib_dic[line[0]] = line[1]

            #print(self.fib_dic)
        except Exception, e:
            print(Exception, ", ", e)
            print("Failed to load the config file")
            raise SystemExit

        try:
            if not os._exists('./cache/'):
                os.mkdir('./cache')
        except:
            return

    def _bind_socket(self):
        """
        Create the sever socket and bind it to the given host and port
        :return:
        """
        self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
                                      1)
        print("Now binding the socket, host is ", self.host, " port is ",
              self.port)
        self.server_socket.bind((self.host, self.port))
예제 #49
0
        "Listening for messages on {}..\n".format(available_subscription_path))

    try:
        # Calling result() on StreamingPullFuture keeps the main thread from
        # exiting while messages get processed in the callbacks.
        streaming_pull_future.result()
    except:  # noqa
        streaming_pull_future.cancel()

    available_subscriber.close()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    parser.add_argument("project_id", help="Google Cloud project ID")
    parser.add_argument("subscription_id", help="Pub/Sub subscription ID")
    parser.add_argument(
        "workdir",
        help="Directory where files are downloaded and modified",
        nargs='?',
        default="tmpwork")

    args = parser.parse_args()

    if not os._exists(args.workdir):
        os.makedirs(args.workdir)
    process_work(args.project_id, args.subscription_id, args.workdir)
    os.rmdir(args.workdir)
예제 #50
0
                    f.close()
                elif file.startswith(sefer):
                    ref_form = Ref("Mishnah " + file.replace("-", ":").replace(
                        ".txt",
                        ""))  #Berakhot 3-13.txt --> Mishnah Berakhot 3:13
                    found_ref[ref_form.normal()] += 1
                    chapter, mishnah = ref_form.sections[0], ref_form.sections[
                        1]
                    if chapter not in parsed_text[sefer].keys():
                        parsed_text[sefer][chapter] = {}
                    lines = get_lines_from_web(file.rsplit("/")[-1],
                                               download_mode=download_mode)
                    if not lines:
                        if not download_mode:
                            assert os._exists(
                                file
                            ), "File exists on hard drive in text format but not in HTML format"
                        print "NOT FOUND"
                        continue
                    if not download_mode:
                        lines = [
                            line.replace(u"\xa0", u"") for line in lines
                            if line.replace(u" ", u"").replace(u"\xa0", u"")
                        ]
                        parsed_text[sefer][chapter][mishnah] = parse(
                            lines, sefer, chapter, mishnah,
                            HOW_MANY_REFER_TO_SECTIONS)

            if not download_mode:
                most_common_value = found_ref.most_common(1)[0]
                assert most_common_value[1] == 1, "{} has {}".format(
# 25/02/17 Updated the camera parameter optimisation options to exploit the greater flexibility now offered.
# 25/02/17 Added a required test for non-None marker locations (Metashape now sets them to none if unselected).
# 25/02/17 Multiple name changes to accommodate Metashape updates of chunk accuracy attributes (e.g. tie_point_accuracy).
# 25/02/17 Multiple changes to export function parameters to accommodate Metashape updates.

########################################################################################
######################################   SETUP    ######################################
########################################################################################
# Update the parameters below to tailor the script to your project.

# Directory where output will be stored and active control file is saved.
# Note use of '/' in the path (not '\'); end the path with '/'
# The files will be generated in a sub-folder named "Monte_Carlo_output"
# Change the path to the one you want, but there's no need to change act_ctrl_file.
dir_path = 'C:/HG_Projects/CWC_Drone_work/HG_Retest_Pia/'
if os._exists(dir_path):
    pass
else:
    os.mkdir(dir_path)

act_ctrl_file = 'active_ctrl_indices.txt'

# Define how many times bundle adjustment (Metashape 'optimisation') will be carried out.
# 4000 used in original work, as a reasonable starting point.
num_randomisations = 1000

# Define the camera parameter set to optimise in the bundle adjustment.

# WE NEED TO CHANGE THIS TO USE THE PARAMETERS OF THE PROJECT. WE ALSO NEED TO DOUBLE CHECK WHERE IN OUR WORKFLOW THE PARAMETERS ARE SET? (IF SCRIPT 1 OK, IF SCRIPT 2 WE NEED TO MAKE A CHANGE TO INCLUDE THE SETTING IN SCRIPT 1).

# v.1.3 of Photoscan enables individual selection/deselection of all parameters.
예제 #52
0
import create_db
import os
from pathlib import Path

database_file = 'data.db'
#if database_file.is_file():
#   print('Jop')
name = os.getcwd() + '\\' + database_file
temp = os._exists(name)
print(name)
print(temp)
#if os.exists(database_file)==1:
#   print('Hell Yeah!')
예제 #53
0
        return self.model(output)


G = Generator()
D = Discriminator()
loss = nn.MSELoss()
Optimizer_G = torch.optim.Adam(G.parameters(), lr=opt.lr)
Optimizer_D = torch.optim.Adam(D.parameters(), lr=opt.lr)
cuda = True if torch.cuda.is_available else False
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
G.cuda()
D.cuda()
loss.cuda()

if not os._exists(opt.save_path):
    os.mkdir(opt.save_path)
# ##############
# # training
# #############
for epoch in range(opt.Epoch):
    for i, (img, label) in enumerate(train_dataloader):
        real = torch.ones(img.shape[0], 1)
        real = Variable(real.type(FloatTensor))
        fake = torch.zeros(img.shape[0], 1)
        fake = Variable(fake.type(FloatTensor))

        real_img = Variable(img.type(FloatTensor))
        real_label = Variable(label.type(LongTensor))

        Optimizer_G.zero_grad()
예제 #54
0
def add(request, username):
    """
    add
    """
    if request.POST:
        time = timezone.now()
        location = request.POST.get('location')
        name = request.POST.get('name')
        money = request.POST.get('money')
        phone = request.POST.get('phone')
        area = request.POST.get("area")
        description = request.POST.get("description")
        username = request.session['userName']
        pic1 = "static/users/default.jpg"
        pic2 = "static/users/default.jpg"
        pic3 = "static/users/default.jpg"
        pic4 = "static/users/default.jpg"
        if request.FILES:
            try:
                os.chdir('HouseRent')
            except FileNotFoundError:
                pass
            filepath = "static/users/" + username + "/"
            try:
                if not os._exists(filepath):
                    os.makedirs(filepath)
            except FileExistsError:
                pass
            pic1 = filepath + str(random.randint(100, 9999)) + ".jpg"
            with open(pic1, "wb") as fh:
                for content in request.FILES.get('pic1'):
                    fh.write(content)
            pic2 = filepath + str(random.randint(100, 9999)) + ".jpg"
            with open(pic2, "wb") as fh:
                for content in request.FILES.get('pic2'):
                    fh.write(content)
            pic3 = filepath + str(random.randint(100, 9999)) + ".jpg"
            with open(pic3, "wb") as fh:
                for content in request.FILES.get('pic3'):
                    fh.write(content)
            pic4 = filepath + str(random.randint(100, 9999)) + ".jpg"
            with open(pic4, "wb") as fh:
                for content in request.FILES.get('pic4'):
                    fh.write(content)

        try:
            if not User.objects.get(username__exact=username).isMedium: #普通用户
                if request.POST.get('1') == '出租':
                    House.objects.create(location=location, money=money,
                                         name=name, phone=phone,
                                         area=area, description=description,
                                         pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4,
                                         time=time, username=username,
                                         isWanted=False, isMedium=False,
                                         isBooked=False)
                elif request.POST.get('1') == '求租':
                    House.objects.create(location=location, money=money,
                                         name=name, phone=phone,
                                         area=area, description=description,
                                         pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4,
                                         time=time, username=username,
                                         isWanted=True, isMedium=False,
                                         isBooked=False)
            else:
                if request.POST.get('1') == '出租':
                    House.objects.create(location=location, money=money,
                                         name=name, phone=phone,
                                         area=area, description=description,
                                         pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4,
                                         time=time, username=username,
                                         isWanted=False, isMedium=True,
                                         isBooked=False)
                elif request.POST.get('1') == '求租':
                    House.objects.create(location=location, money=money,
                                         name=name, phone=phone,
                                         area=area, description=description,
                                         pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4,
                                         time=time, username=username,
                                         isWanted=True, isMedium=True,
                                         isBooked=False)
        except Exception:
            request.session['inputError'] = True
            return redirect('/HouseRent/{0}/release/'.format(username))
    request.session['isRlsSuccess'] = True
    return HttpResponseRedirect('/HouseRent/')
def Janela_Cadastro():
    Janela_Cadastro = Tk()
    Janela_Cadastro.title("CADASTRO USUARIO")

    # VERIFICA SE O ICONE ESTÀ NA PASTA E MOSTRA SE TRUE
    if (not os._exists('treino128x128.ico')):
        janela_principal.iconbitmap('treino128x128.ico')

    Janela_Cadastro.geometry("500x500")

    Frame_Direito = Label(Janela_Cadastro)
    Frame_Direito.pack(side=RIGHT)

    Frame_Esquerdo = Label(Janela_Cadastro)
    Frame_Esquerdo.pack(side=LEFT)

    Label(Frame_Esquerdo, text="NOME: ").pack()
    Nome = Entry(Frame_Esquerdo)
    Nome.pack()

    Label(Frame_Esquerdo, text="IDADE: ").pack()
    Idade = Entry(Frame_Esquerdo)
    Idade.pack()

    Label(Frame_Esquerdo, text="SEXO: ").pack()
    Sexo = Entry(Frame_Esquerdo)
    Sexo.pack()

    LbAltura = Label(Frame_Esquerdo, text="ALTURA: ")
    LbAltura.pack()
    Altura = Entry(Frame_Esquerdo)
    Altura.pack()

    LbPeso = Label(Frame_Esquerdo, text="PESO: ")
    LbPeso.pack()
    Peso = Entry(Frame_Esquerdo)
    Peso.pack()

    LbBicepsD = Label(Frame_Direito, text="BICEPS DIREITO: ")
    LbBicepsD.pack()
    Biceps_direito = Entry(Frame_Direito)
    Biceps_direito.pack()

    LbBicepsE = Label(Frame_Esquerdo, text="BICEPS ESQUERDO: ")
    LbBicepsE.pack()
    Biceps_esquerdo = Entry(Frame_Esquerdo)
    Biceps_esquerdo.pack()

    LbCD = Label(Frame_Direito, text="COXA DIREITA: ")
    LbCD.pack()
    Coxa_direita = Entry(Frame_Direito)
    Coxa_direita.pack()

    LbCE = Label(Frame_Esquerdo, text="COXA ESQUERDA: ")
    LbCE.pack()
    Coxa_esquerda = Entry(Frame_Esquerdo)
    Coxa_esquerda.pack()

    LbBracoD = Label(Frame_Direito, text="ANTI BRAÇO DIREITO: ")
    LbBracoD.pack()
    Antibraco_direito = Entry(Frame_Direito)
    Antibraco_direito.pack()

    LbBracoE = Label(Frame_Esquerdo, text="ANTI BRAÇO ESQUERDO: ")
    LbBracoE.pack()
    Antibraco_esquerdo = Entry(Frame_Esquerdo)
    Antibraco_esquerdo.pack()

    LbPanturrilhaD = Label(Frame_Direito, text="PANTURRILHA DIREITA: ")
    LbPanturrilhaD.pack()
    Panturrilha_direita = Entry(Frame_Direito)
    Panturrilha_direita.pack()

    LbPanturrilhaE = Label(Frame_Esquerdo, text="PANTURRILHA ESQUERDA: ")
    LbPanturrilhaE.pack()
    Panturrilha_esquerda = Entry(Frame_Esquerdo)
    Panturrilha_esquerda.pack()

    LbAbdomen = Label(Frame_Direito, text="ABDÔMEN: ")
    LbAbdomen.pack()
    Abdomen = Entry(Frame_Direito)
    Abdomen.pack()

    LbCintura = Label(Frame_Direito, text="CINTURA: ")
    LbCintura.pack()
    Cintura = Entry(Frame_Direito)
    Cintura.pack()

    LbQuadril = Label(Frame_Direito, text="QUADRIL: ")
    LbQuadril.pack()
    Quadril = Entry(Frame_Direito)
    Quadril.pack()

    LbTorax = Label(Frame_Direito, text="TÓRAX: ")
    LbTorax.pack()
    Torax = Entry(Frame_Direito)
    Torax.pack()

    LbOmbro = Label(Frame_Direito, text="OMBRO: ")
    LbOmbro.pack()
    Ombro = Entry(Frame_Direito)
    Ombro.pack()

    def BtSalva_clique():
        global nome
        global idade
        global sexo
        global altura
        global peso
        global biceps_direito
        global biceps_esquerdo
        global coxa_direita
        global coxa_esquerda
        global antibraco_direito
        global antibraco_esquerdo
        global panturrilha_direita
        global panturrilha_esquerda
        global abdomen
        global cintura
        global quadril
        global torax
        global ombro

        ###SALVANDO TODOS OS DADOS PASSADOS PARA AS VARIAVEIS EXTERNAS

        nome                 =   str(Nome.get()).upper()
        idade                =   int(Idade.get())
        sexo                 =   str(Sexo.get())
        altura               =   float(Altura.get())
        peso                 =   float(Peso.get())
        biceps_direito       =   float(Biceps_direito.get())
        biceps_esquerdo      =   float(Biceps_esquerdo.get())
        coxa_direita         =   float(Coxa_direita.get())
        coxa_esquerda        =   float(Coxa_esquerda.get())
        antibraco_direito    =   float(Antibraco_direito.get())
        antibraco_esquerdo   =   float(Antibraco_esquerdo.get())
        panturrilha_direita  =   float(Panturrilha_direita.get())
        panturrilha_esquerda =   float(Panturrilha_esquerda.get())
        abdomen              =   float(Abdomen.get())
        cintura              =   float(Cintura.get())
        quadril              =   float(Quadril.get())
        torax                =   float(Torax.get())
        ombro                =   float(Ombro.get())

        #Muda o Nome do Botão Salvar e a cor para Amarelo
        BtSalva["text"]="SALVO"
        BtSalva["bg"] = "yellow"


    BtSalva = Button(Janela_Cadastro, text="SALVAR", command= BtSalva_clique)
    BtSalva.pack(side=BOTTOM)
def AnalyzePhase(AtPct=None, WtPct=None, OxWtPct=None):

    #Normalize our AtPct vector.
    AtPct = AtPct/sum(AtPct)*100

    # A dictionary of the AtPct values would be useful so we can look up by element name.
    E = dict(zip(pb.ElementalSymbols, AtPct))

    # We output an output string which contains the analysis.
    OutStr = '--- Simple At% ratios ---\n\n'

    OutStr += "Abundances ratioed to:\n"
    OutStr += "Element to   Mg       Si       Fe\n"
    OutStr += '-'*41 + '\n'
    for Zminus, E in enumerate(AtPct):
        if E != 0:
            EtoMg = E / AtPct[pb.Mg-1]
            EtoSi = E / AtPct[pb.Si-1]
            EtoFe = E / AtPct[pb.Fe-1]
            OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple([pb.ElementalSymbols[Zminus+1]]) + tuple([EtoMg, EtoSi, EtoFe]))


    # We output an output string which contains the analysis.
    OutStr += '--- Chondritic Analysis ---\n\n'

    # Load the prosolar abundances.  This is recorded from the Lodders ref with logarithmic values.
    ProtosolarAbundancesFileName = 'ProtosolarAbundances.csv'
    if not os._exists(ProtosolarAbundancesFileName):
        ProtosolarAbundancesFileName = os.path.join('ConfigData', ProtosolarAbundancesFileName)
    Protosolar = genfromtxt(ProtosolarAbundancesFileName, delimiter=',', skip_header=1, dtype=None)
    ProtosolarDict = dict(Protosolar)   # This dictionary could be handy...
    Protosolar = array(zip(*Protosolar)[1]) # But we really need just a numpy array with the numbers.

    # Convert to vectors which are normalized to Mg, Si, and Fe.
    ProtosolarToMg = power(10, Protosolar)  # Get out of log space into linear space.  Now the numbers relate to AtPct.
    ProtosolarToMg /= ProtosolarToMg[pb.Mg-1]
    ProtosolarToSi = power(10, Protosolar)
    ProtosolarToSi /= ProtosolarToSi[pb.Si-1]
    ProtosolarToFe = power(10, Protosolar)
    ProtosolarToFe /= ProtosolarToFe[pb.Fe-1]

    # Print out the abundances normalized to protosolar.
    Ratios = list() # Keep track of the ratios, so at the end we can compute standard deviations.
    OutStr += "Abundances ratioed to protosolar and normalized to:\n"
    OutStr += "Element to   Mg       Si       Fe\n"
    OutStr += '-'*41 + '\n'
    for Zminus, E in enumerate(AtPct):
        if E != 0:
            EtoMg = E / AtPct[pb.Mg-1]
            EtoSi = E / AtPct[pb.Si-1]
            EtoFe = E / AtPct[pb.Fe-1]
            Ratios.append([EtoMg/ProtosolarToMg[Zminus], EtoSi/ProtosolarToSi[Zminus], EtoFe/ProtosolarToFe[Zminus]])
            OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple([pb.ElementalSymbols[Zminus+1]]) + tuple(Ratios[-1]))
    Ratios = array(Ratios)
    Means = mean(Ratios, axis=0)
    Stdevs = std(Ratios, axis=0)
    OutStr += '-'*41 + '\n'
    OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple(['Mean']) + tuple(Means))
    OutStr += '%-13s%-9.3f%-9.3f%-9.3f\n' % (tuple(['Standard dev']) + tuple(Stdevs))
    OutStr += '-'*41 + '\n'

    OutStr += '\nRef:Lodders, K. (2003). Solar System Abundances and Condensation Temperatures of the Elements. The Astrophysical Journal, 591(2), 1220-1247. http://doi.org/10.1086/375492\n'

    return OutStr
예제 #57
0
def test_bbox(model, to_save=True):
    # Test dataset.
    testset = args.dataset
    dataset_test = HandDataset()
    dataset_test.load_hand('datasets/' + args.dataset + "_test_annotations.txt")
    dataset_test.prepare()
    pred_m = []
    gt_m = []
    pred_s = []
    dir_name = "./samples/hand/results/oxford_{:%m%d%H%M}/".format(
        datetime.datetime.now())
    if to_save:
        if not os._exists(dir_name):
            os.mkdir(dir_name)
    gt_a = []
    pred_a = []
    for image_info in dataset_test.image_info:
        print(image_info)
        image_id = image_info['id']
        image_path = image_info['path']
        img_origin = skimage.io.imread(image_path)
        h, w, _ = img_origin.shape
        img = img_origin.copy()

        gt_polygons = image_info['polygons']
        gt_boxes = []
        gt_class_ids = []

        for gt_polygon in gt_polygons:
            x = [gt_polygon[0], gt_polygon[2], gt_polygon[4], gt_polygon[6]]
            y = [gt_polygon[1], gt_polygon[3], gt_polygon[5], gt_polygon[7]]
            gt_boxes.append([min(y), min(x), max(y), max(x)])
            gt_class_ids.append(1)
        gt_boxes = np.array(gt_boxes)
        gt_class_ids = np.array(gt_class_ids)
        gt_masks, gt_mask_class_ids = dataset_test.load_mask(image_id)
        gt_orientations = dataset_test.load_orientations(image_id)

        result = model.detect([img], verbose=0)[0]
        pred_boxes = result['rois']
        pred_class_ids = result["class_ids"]
        pred_scores = result["scores"]
        pred_masks = result["masks"]
        pred_orientations = result["orientations"]
        save_img = img_origin
        y1 = -1
        for gt_box in gt_boxes:
            y1, x1, y2, x2 = gt_box
        if y1 > 0:
            if len(pred_boxes) > 0:
                gt_match, pred_match, overlaps, pred_scores, gt_angles, pred_angles = \
                    utils.compute_matches_with_scores_bbox(gt_boxes, gt_class_ids, gt_masks, gt_orientations,
                                                           pred_boxes, pred_class_ids, pred_scores, pred_masks,
                                                           pred_orientations,
                                                           iou_threshold=0.5, score_threshold=0.0)
                gt_a.extend(gt_angles)
                pred_a.extend(pred_angles)

                if to_save:
                    save_img = color_white(save_img, pred_masks, pred_orientations)
            else:
                gt_match = len(gt_boxes) * [-1]
                pred_match = []
                pred_scores = []
        else:
            gt_match = []
            if len(pred_boxes) > 0:
                pred_match = len(pred_boxes) * [-1]
                pred_scores = pred_scores
            else:
                pred_match = []
                pred_scores = []
        if to_save:
            filename = dir_name + image_path.split('/')[-1]
            print(filename)
            skimage.io.imsave(filename, save_img)

        print("pred_match: ", pred_match)
        print("gt_match: ", gt_match)
        print("pred_scores", pred_scores)
        gt_m.extend(gt_match)
        pred_m.extend(pred_match)
        pred_s.extend(pred_scores)
        # Temp AP
        assert len(pred_m) == len(pred_s)
        tmp_pred_m = np.array(pred_m)
        tmp_gt_m = np.array(gt_m)
        tmp_pred_s = np.array(pred_s)
        # sort the score
        tmp_sorted_idx = np.argsort(tmp_pred_s)[::-1]
        tmp_pred_m = tmp_pred_m[tmp_sorted_idx]
        # Compute precision and recall at each prediction box step
        tmp_precisions = np.cumsum(tmp_pred_m > -1) / (np.arange(len(tmp_pred_m)) + 1)
        tmp_recalls = np.cumsum(tmp_pred_m > -1).astype(np.float32) / len(tmp_gt_m)
        print("AP = ", voc_ap(tmp_recalls, tmp_precisions))

    # Compute mean AP over recall range
    assert len(pred_m) == len(pred_s)
    pred_m = np.array(pred_m)
    gt_m = np.array(gt_m)
    pred_s = np.array(pred_s)
    # sort the score
    sorted_idx = np.argsort(pred_s)[::-1]
    pred_m = pred_m[sorted_idx]
    pred_s = pred_s[sorted_idx]
    # Compute precision and recall at each prediction box step
    precisions = np.cumsum(pred_m > -1) / (np.arange(len(pred_m)) + 1)
    recalls = np.cumsum(pred_m > -1).astype(np.float32) / len(gt_m)
    mAP = voc_ap(recalls, precisions)
    print("AP = ", mAP)

    plt.figure(1)
    plt.plot(recalls, precisions)
    plt.savefig(dir_name + args.testset + "_pre_rec.png")

    pr_dict = {"precison": precisions, "recall": recalls}

    # angle
    delta_angles = [np.abs(pred_a[i] - gt_a[i]) for i in range(len(pred_a))]
    for i in range(len(delta_angles)):
        delta_angles[i] = delta_angles[i] % 360
        if delta_angles[i] > 180:
            delta_angles[i] = 360 - delta_angles[i]

    def angle_accuracy(d_angles, thres=10):
        pred_r = [dangle <= thres for dangle in d_angles]
        accu = sum(pred_r) / len(pred_r)
        return accu,

    accuracys = [angle_accuracy(delta_angles, thres) for thres in range(90)]
    print("num matched = ", len(delta_angles))
    print("thres = 10, accu = ", accuracys[10])
    print("thres = 20, accu = ", accuracys[20])
    print("thres = 30, accu = ", accuracys[30])

    return mAP
예제 #58
0
weekday_dict = {'mon':'월요일','tue':'화요일','wed':'수요일','thu':'목요일',\
                'fri':'금요일','sat':'토요일','sun':'일요일'}

#items() 튜플들이 리스트 안에 출력되게 하는 코드
print(weekday_dict.items())
# dict_items([('mon', '월요일'), ('tue', '화요일'), ('wed', '수요일'), ('thu', '목요일'), ('fri', '금요일'), ('sat', '토요일'), ('sun', '일요일')])
print()

print(weekday_dict.values())
# dict_values(['월요일', '화요일', '수요일', '목요일', '금요일', '토요일', '일요일'])
print()
try:
    for mydir in weekday_dict.values():
        mypath = myfolder + mydir
        print('mypath', mypath)
        if os._exists(mypath):
            #기존의 파일이 있으면 지워라는 코드
            shutil.rmtree(mypath)

        os.mkdir(mypath)
except FileExistsError:
    pass
####################################################


#saveFile을 만들어 정의
def saveFile(image_url, weekday, mytitle):
    image_file = urlopen(image_url)
    myfile = open(
        'c:\\imsi\\' + weekday_dict[weekday] + '\\' + mytitle + '.jpg', 'wb')
예제 #59
0
                                      start_date=du.getTenYearsAgoTime(),
                                      end_date=du.getYesterDayTime(),
                                      frequency="d",
                                      adjustflag="3")
    #### 打印结果集 ####
    result_list = []
    while (rs.error_code == '0') & rs.next():
        result_list.append(rs.get_row_data())
    result2 = pd.DataFrame(result_list, columns=rs.fields, dtype=np.float)
    print(result2.dtypes)
    result2 = result2.sort_values(by='pbMRQ')
    result2 = result2.reset_index(drop=True)
    #获取某个值在集合中的位置
    todayPBIndex = result2[(
        result2.date == du.getYesterDayTime())].index.tolist()[0]
    lensPB = len(result2)
    print("PB位置是:" + str(todayPBIndex) + ",总列数是:" + str(lensPB))
    print("PB的分位点是:" + str(todayPBIndex / lensPB * 100)[:4] + "%")
    #### 结果集输出到csv文件 ####
    my_file = "/Users/mfhj-dz-001-068/pythonData/pe_" + code + "_data.csv"
    if os._exists(my_file):
        #删除文件
        os.remove(my_file)

    result2.to_csv("/Users/mfhj-dz-001-068/pythonData/pe_" + code +
                   "_data.csv",
                   encoding="gbk",
                   index=False)

print("-----登陆系统:")
bs.logout()
예제 #60
0
def executeODESimulation(funcForSolver, directory_for_network, inputsArray, initializationDic=None, outputList=None,
                         leak=10 ** (-13), endTime=1000, sparse=False, modes=["verbose","time", "outputPlot", "outputEqui"],
                         timeStep=0.1, initValue=10**(-13), rescaleFactor=None):
    """
        Execute the simulation of the system saved under the directory_for_network directory.
        InputsArray contain the values for the input species.
    :param funcForSolver: function used by the solver. Should provide the derivative of concentration with respect to time for all species.
                          can be a string, then we use the lassie method.
    :param directory_for_network: directory path, where the files equations.txt and constants.txt may be found.
    :param inputsArray: The test concentrations, a t * n array where t is the number of test and n the number of node in the first layer.
    :param initializationDic: can contain initialization values for some species. If none, or the species don't appear in its key, then its value is set at initValue (default to 10**(-13)).
    :param outputList: list or string, species we would like to see as outputs, if default (None), then will find the species of the last layer.
                                      if string and value is "nameDic" or "all", we will give all species taking part in the reaction (usefull for debug)
    :param leak: float, small leak to add at each time step at the concentration of all species
    :param endTime: final time
    :param sparse: if sparse
    :param modes: modes for outputs
    :param timeStep: float, value of time steps to use in integration
    :param initValue: initial concentration value to give to all species
    :param rescaleFactor: if None, then computed as the number of nodes, else: used to divide the value of the inputs
    :return:
            A result tuple depending on the modes.
    """

    parsedEquation,constants,nameDic=read_file(directory_for_network + "/equations.txt", directory_for_network + "/constants.txt")
    if sparse:
        KarrayA,stochio,maskA,maskComplementary = sparseParser(parsedEquation,constants)
    else:
        KarrayA,stochio,maskA,maskComplementary = parse(parsedEquation,constants)
    KarrayA,T0,C0,constants=setToUnits(constants,KarrayA,stochio)
    print("Initialisation constant: time:"+str(T0)+" concentration:"+str(C0))

    speciesArray = obtainSpeciesArray(inputsArray,nameDic,initValue,initializationDic,C0)
    speciesArray,rescaleFactor = rescaleInputConcentration(speciesArray,nameDic=nameDic,rescaleFactor=rescaleFactor)

    time=np.arange(0,endTime,timeStep)
    derivativeLeak = leak

    ##SAVE EXPERIMENT PARAMETERS:
    attributesDic = {}
    attributesDic["rescaleFactor"] = rescaleFactor
    attributesDic["leak"] = leak
    attributesDic["T0"] = T0
    attributesDic["C0"] = C0
    attributesDic["endTime"] = endTime
    attributesDic["time_step"] = timeStep
    for k in initializationDic.keys():
        attributesDic[k] = speciesArray[0,nameDic[k]]
    for idx,cste in enumerate(constants):
        attributesDic["k"+str(idx)] = cste
    attributesDic["Numbers_of_Constants"] = len(constants)
    experiment_path=saveAttribute(directory_for_network, attributesDic)

    shapeP=speciesArray.shape[0]

    #let us assign the right number of task in each process
    num_workers = multiprocessing.cpu_count()-1
    idxList = findRightNumberProcessus(shapeP,num_workers)

    #let us find the species of the last layer in case:
    if outputList is None:
        outputList = obtainOutputArray(nameDic)
    elif type(outputList)==str:
        if outputList=="nameDic" or outputList=="all":
            outputList=list(nameDic.keys())
        else:
            raise Exception("asked outputList is not taken into account.")
    t=tm()
    print("=======================Starting simulation===================")
    if(hasattr(funcForSolver,"__call__")):
        copyArgs = obtainCopyArgs(modes,idxList,outputList,time,funcForSolver,speciesArray,KarrayA,stochio,maskA,maskComplementary,derivativeLeak,nameDic)
        with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
            myoutputs = pool.map(scipyOdeSolverForMultiProcess, copyArgs)
        pool.close()
        pool.join()
    else:
        assert type(funcForSolver)==str
        copyArgs = obtainCopyArgsLassie(modes,idxList,outputList,time,directory_for_network,parsedEquation,constants,derivativeLeak,nameDic,speciesArray,funcForSolver)
        with multiprocessing.get_context("spawn").Pool(processes= len(idxList[:-1])) as pool:
            myoutputs = pool.map(lassieGPUsolverMultiProcess, copyArgs)
        pool.close()
        pool.join()
    print("Finished computing, closing pool")
    timeResults={}
    timeResults[directory_for_network + "_wholeRun"]= tm() - t

    if("outputEqui" in modes):
        outputArray=np.zeros((len(outputList), shapeP))
    if("outputPlot" in modes):
        outputArrayPlot=np.zeros((len(outputList), shapeP, time.shape[0]))
    times = []
    for idx,m in enumerate(myoutputs):
        if("outputEqui" in modes):
            try:
                outputArray[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputEqui")]
            except:
                raise Exception("error")
        if("outputPlot" in modes):
            outputArrayPlot[:,idxList[idx]:idxList[idx+1]] = m[modes.index("outputPlot")]
        if("time" in modes):
            times += [m[modes.index("time")]]
    if("time" in modes):
        timeResults[directory_for_network + "_singleRunAvg"] = np.sum(times) / len(times)

    # Let us save our result:
    savedFiles = ["false_result.csv","output_equilibrium.csv","output_full.csv"]
    for k in nameDic.keys():
        savedFiles += [k+".csv"]
    for p in savedFiles:
        if(os._exists(os.path.join(experiment_path, p))):
            print("Allready exists: renaming older")
            os.rename(os.path.join(experiment_path,p),os.path.join(experiment_path,p.split(".")[0]+"Old."+p.split(".")[1]))
    if("outputEqui" in modes):
        df=pandas.DataFrame(outputArray)
        df.to_csv(os.path.join(experiment_path, "output_equilibrium.csv"))
    elif("outputPlot" in modes):
        assert len(outputArrayPlot == len(outputList))
        for idx,species in enumerate(outputList):
            df=pandas.DataFrame(outputArrayPlot[idx])
            df.to_csv(os.path.join(experiment_path, "output_full_"+str(species)+".csv"))

    results=[0 for _ in range(len(modes))]
    if("outputEqui" in modes):
        results[modes.index("outputEqui")]= outputArray
    if("outputPlot" in modes):
        results[modes.index("outputPlot")]= outputArrayPlot
    if "time" in modes:
        results[modes.index("time")]=timeResults

    if("outputPlot" in modes): #sometimes we need the nameDic
        results+=[nameDic]
    return tuple(results)