Exemple #1
0
def sellAll(date=None, verbose=False):
    """
    This function will quickly sell all stocks in the portfolio by performing
    a sell transaction for each stock in the portfolio. This modifies the
    portfolio dictionary
    """
    if date == None:
        date = portfolio['date']
    else:
        date = normaliseDate(date)
    for x in ld(
            "stockdata/"):  #as otherwise an error iterating over dictionary
        if x[:-4] in portfolio:
            if bool(verbose) == False:
                addTransaction({
                    'date': date,
                    'symbol': x[:-4],
                    'volume': portfolio[x[:-4]] * (-1)
                })
            else:
                addTransaction(
                    {
                        'date': date,
                        'symbol': x[:-4],
                        'volume': portfolio[x[:-4]] * (-1)
                    }, True)
def get_samples(depends_on, includes, burn):
    """Get all samples from all runs."""

    paths = make_paths(depends_on, includes, 0)
    sample_files = [f for f in ld(paths['model_dir']) if 'samples_' in f]
    dfs = [
        pd.read_csv(pj(paths['model_dir'], f), index_col=0).ix[1000:]
        for f in sample_files
        ]
    df = pd.concat(dfs, axis=0)

    for vcol in [c for c in df.columns if 'v_' in c]:

        df[vcol] = -df[vcol]

    for i, iv in enumerate(['age', 'group', 'group:age', 'Intercept'], 1):
        plt.subplot(2, 2, i)
        if iv == 'Intercept':
            _df = df[['%s_%s' % (p, iv) for p in 'atv']]
        else:
            _df = df[['%s_%s' % (p, iv) for p in depends_on]]
        sns.violinplot(data=_df)
        plt.hlines(0, 0, 3)
    plt.show()

    return df
def load_model_with_all_samples(depends_on, includes, burn):
    """Create a new model then load all the existing samples."""

    paths = make_paths(depends_on, includes, 0)
    sample_files = [f for f in ld(paths['model_dir']) if 'samples_' in f]

    m = regression_model(depends_on, includes, True)
    m.sample(3)
    stochs = m.get_stochastics()

    for node in stochs.node:

        node.trace._trace[0] = np.array([])

    for f in sample_files:

        samples = pd.read_csv(pj(paths['model_dir'], f))

        for node in stochs.node:

            name = node.__name__
            print node, name
            s1 = copy(node.trace._trace[0])
            s2 = samples[name].values[burn:]
            node.trace._trace[0] = np.concatenate([s1, s2])

    m.gen_stats()

    return m
Exemple #4
0
 def get_path(classname, workspace):
     list_names = ld(workspace + '/' + classname)
     # print(list(map(lambda x: workspace+classname+'/'+x, list_names)))
     return list(
         map(
             lambda x: workspace + '/' + classname + '/' + x + ',' +
             classname, list_names))
Exemple #5
0
 def preprocess(self):
     if self.root:
         file_slice = []
         skipcount = 0
         for file_path in ld(self.root):
             path = os.path.join(self.root, file_path)
             ds = dicom.dcmread(
                 path)  # will read as dicom object, not as array
             if hasattr(ds, 'SliceLocation'):
                 file_slice.append(ds)
             else:
                 skipcount += 1
         # sort the dicom objects based on slice location
         slice_arr = sorted(file_slice, key=lambda x: x.SliceLocation)
         # create a 3d array
         image = np.stack([arg.pixel_array for arg in slice_arr])
         image = image.astype(np.int16)  # convert to np.int16
         """
         set outside of scans pixels to 0
         The intercept is usually -1024, and air is apprx 0
         Anything outside of lung region is set to 0
         """
         image[image == -2000] = 0
         # Convert to HU
         intercept = slice_arr[0].RescaleIntercept
         slope = slice_arr[0].RescaleSlope
         # we make sure the slope is 1
         if slope != 1:
             image = slope * image.astype(np.float64)
             image = image.astype(np.int16)
         image += np.int16(intercept)
         return np.array(image, dtype=np.int16)
     else:
         return None
    def handwriting_class_test(self, t0):
        handwriting_labels = []
        training_file_list = ld(self.TRAINING_DIGITS)
        dir_length_training = len(training_file_list)
        training_mat = np.zeros((dir_length_training, 1024))

        # Create dataset and label vectors from training image data
        for iterator in range(dir_length_training):
            file_name_str = training_file_list[iterator]
            file_str = file_name_str.split(".")[0]
            class_num_str = int(file_str.split("_")[0])

            handwriting_labels.append(class_num_str)
            training_mat[iterator, :] = self.convert_image_to_vector(
                "{}/{}".format(self.TRAINING_DIGITS, file_name_str))

        error_count = 0.0
        test_file_list = ld(self.TEST_DIGITS)
        dir_length_test = len(test_file_list)

        # Create dataset and label vectors from test image data, then use with classifier against training data
        for iterator in range(dir_length_test):
            file_name_str = test_file_list[iterator]
            file_str = file_name_str.split(".")[0]
            class_num_str = int(file_str.split("_")[0])

            vector_under_test = self.convert_image_to_vector("{}/{}".format(
                self.TEST_DIGITS, file_name_str))
            classifier_response = self.basic_label_classifier(
                vector_under_test, training_mat, handwriting_labels, 3)

            print(
                "The classifier came back with: {}.\nThe real answer is: {}.\n"
                .format(classifier_response, class_num_str))

            if (classifier_response != class_num_str):
                error_count += 1.0

        print(
            "\nThe total number of errors is: {}.\nThe total error rate is: {}.\n"
            .format(error_count, error_count / float(dir_length_test)))
        self.calculate_runtime(t0)
        return
Exemple #7
0
def loadAllStocks():
    """
    This function applies loadStock() to all files in the "stockdata/"
    subdirectory, hence loads all possible stocks to the dictionary
    """
    for i in ld("stockdata/"):
        try:
            loadStock(
                i[:-4])  #removes ".csv", which is added again by loadStock()
        except Exception:
            pass
    def load_images_from_directory(self, dirname):
        # Initializes dataset, class label vector, and data's dimensionality constant
        handwriting_labels = []
        training_file_list = ld(dirname)
        DIR_LENGTH = len(training_file_list)
        training_mat = np.zeros((DIR_LENGTH, 1024))

        # Iterates through data's length to log every image file and class label
        for iterator in range(DIR_LENGTH):
            filename = training_file_list[iterator]
            file = filename.split(".")[0]
            class_number = int(file.split("_")[0])

            # Contextually labels every image by class number (provided in dataset)
            if class_number == 9:
                handwriting_labels.append(-1)
            else:
                handwriting_labels.append(1)

            # Converts training image data to information vector
            training_mat[iterator, :] = self.convert_image_to_vector("{}/{}".format(dirname, filename))
        
        print("\nTRAINING DATA MATRIX IS: \n{}\n\nHANDWRITING IMAGE LABEL VECTOR IS: \n{}\n".format(training_mat, handwriting_labels))
        return training_mat, handwriting_labels
Exemple #9
0
    def __init__(self,
                 input_dir,
                 data_file,
                 train=False,
                 test=False,
                 offset=0):
        super(SunDataset).__init__()

        def makeList(input, slice, split):
            output = list(i for i in input)
            for i, x in enumerate(output):
                output[i] = x[slice[0]:slice[1]].split(split)
                bl = []
                for j, y in enumerate(output[i]):
                    try:
                        output[i][j] = float(y)
                    except ValueError:
                        bl.append(y)
                for k in bl:
                    output[i].remove(k)
                    bl.remove(k)
            return output

        def cutset(dataset, sampling, inv=False):
            newset = []
            if not inv:
                for i in range(0, len(dataset), sampling):
                    newset.append(dataset[i])
            elif inv:
                x = list(range(len(dataset)))
                for i in range(0, len(dataset), sampling):
                    x.remove(i)
                for i in x:
                    newset.append(dataset[i])
            return newset

        self.input_dir = input_dir
        self.images = []
        for i in ld(self.input_dir):
            for j in ld("{}/{}".format(self.input_dir, i)):
                x = tuple("{}/{}/{}/{}".format(self.input_dir, i, j, k)
                          for k in ld("{}/{}/{}".format(self.input_dir, i, j)))
                if len(x) == 9:
                    self.images.append(x)
        f = open(data_file, "r")
        m = open("./data_sun/missing_days.txt", "r")
        ml = m.readlines()
        ml = makeList(ml, [6, 16], "/")
        new_ml = []
        for i in ml:
            if i not in new_ml:
                new_ml.append(i)

        ml = new_ml
        # for i in ml:
        #     if ml.count(i) != 1:
        #         print(i)
        #         print(ml.count(i))

        # check = makeList()

        self.data = f.readlines()
        checker = makeList(self.data, [0, 10], " ")

        for x in ml:
            try:
                ind = checker.index(x)
                del checker[ind]
                del self.data[ind]
            except ValueError:
                pass

        # for i in checker:
        #     for j, x in enumerate(i):
        #         if len(str(int(x))) == 1:
        #             i[j] = "0" + str(int(x))
        #         else:
        #             i[j] = str(int(x))
        #     path = "./data_sun/images/{}/{}".format(i[0], i[1] + i[2])
        #     if not os.path.isdir(path):
        #         print(path)

        for i, data in enumerate(self.data):
            self.data[i] = float(data[21:24])

        if train:
            self.images = cutset(self.images, 5, inv=True)
            self.data = cutset(self.data, 5, inv=True)
        elif test:
            self.images = cutset(self.images, 5)
            self.data = cutset(self.data, 5)

        self.offset = offset
Exemple #10
0
from os import listdir as ld
from os.path import isfile, join
import cv2
import numpy as np


def Trainer(name, path, file):
    print("Training Start")
    name += ".yml"
    Data, Labels = [], []
    for i, files in enumerate(file):
        pathI = path + file[i]
        img = cv2.imread(pathI, cv2.IMREAD_GRAYSCALE)
        Data.append(np.asarray(img, dtype=np.uint8))
        Labels.append(i)
    Labels = np.asarray(Labels, dtype=np.int32)
    model = cv2.face.LBPHFaceRecognizer_create()
    model.train(np.asarray(Data), np.asarray(Labels))
    model.write(name)
    print("Complete [" + name + "]")


if __name__ == "__main__":
    path1 = '/home/gigachany/CODE/model/data_female/'
    path2 = '/home/gigachany/CODE/model/data_male/'
    file1 = [f for f in ld(path1) if isfile(join(path1, f))]
    file2 = [f for f in ld(path2) if isfile(join(path2, f))]
    Trainer("human_female", path1, file1)
    Trainer("human_male", path2, file2)
        temp = np.mean(mtrx[4][i:i + avg_leng])
        running_avg.append(temp)
        temp2 = np.sum(mtrx[3][i:i + avg_leng])
        routed.append(temp2)
        temp3 = np.sum(mtrx[2][i:i + avg_leng])
        dropped.append(temp3)
        fraction_dropped.append(temp2 / (temp2 + temp3))
    return running_avg, frac_dropped


number_of_subplots = 2
avg_len = 60

# rate = input('1.25, 2.5 or 5?: ')
prefix = "/Users/JLibin/Downloads/Paper/constant_interarrival_times/2.5"
tf1 = ld(prefix)
tf = [join(prefix, tf1[f]) for f in range(len(tf1))]
rltf = len(tf)
transposed = [
    tpose(ldtxt(tf[f], delimiter=',', usecols=range(5))) for f in range(rltf)
]
calc_averages = [calculate_avg(transposed[t], avg_len) for t in range(rltf)]
running_averages = [calc_averages[n][0] for n in range(rltf)]
fraction_dropped = [calc_averages[n][1] for n in range(rltf)]
pts = [
    linspace(1, len(running_averages[n]), len(running_averages[n]))
    for n in range(rltf)
]

fig = plt.figure()
ax1 = plt.subplot(2, 1, 1)
Exemple #12
0

if datetime.datetime.now().month == 1 and datetime.datetime.now().day == 1:
    year = str(datetime.datetime.now().year - 1)[2:]
else:
    year = str(datetime.datetime.now().year)[2:]


if datetime.datetime.now().day == 1:
    month = datetime.datetime.now().month - 1
else:
    month = datetime.datetime.now().month

path = 'D:\Dosyalar\KANTAR BILGILERI\SANTIYE KANTARI\\'

folders = ld(path)


conn = pyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=' + path + folders[-1] + '\CIK' + trMonths[month-1] + year + '.mdb;')
cursor = conn.cursor()
cursor.execute('select * from cikkay')

workbook = xlsxwriter.Workbook(r"C:\Users\TexnikOfis\Documents\K1ve2py.xlsx") 
worksheet = workbook.add_worksheet("Cikkay")

headings = ['plaka', 'cıktar', 'cıksaa', 'tartımno', 'firmaad', 'malad', 'alan1', 'alan2', 'alan3', 'kantar', 'NET']
for c in range(len(headings)):
    worksheet.write(0, c, headings[c])

ri = 1
for row in cursor.fetchall():
Exemple #13
0
def vroom():
    
    zz = argparse.ArgumentParser(
        description="Python script to upload multiple (or single) files to 'transfer.sh'"
    )
    
    zz.add_argument(
        'path',
        help='Path containing files to upload'
    )
    
    zz.add_argument(
        '-c', '--use-curl',
        dest='curl',
        action='store_true',
        help='Uses curl to upload file(s)'
    )
    
    zz.add_argument(
        '-w', '--use-wget',
        dest='wget',
        action='store_true',
        help='Uses wget to upload file(s)'
    )
    
    args = zz.parse_args()
    
    if args.curl and args.wget:
        zz.error("Cannot use both 'wget' and 'curl'")
        sys.exit(1)

    elif not args.curl and not args.wget:
        zz.error("Please choose an upload method (-c OR -w)")
        sys.exit(1)

    args.path = os.path.abspath(args.path)
    
    file_list = []
    
    skipped = []
    
    
    if pid(args.path):
        root_dir = args.path
        
        for sub_file in ld(root_dir):
            if sub_file.startswith('.'):
                continue
            
            if re.search(r"[$/\\&\s\[\]{}^%]", sub_file):
                skipped.append(sub_file)
                continue
            
            sub_path = oj(root_dir, sub_file)
            
            if piff(sub_path):
                
                file_list.append(sub_path)
    
        if args.curl:
            for f_count, file in enumerate(file_list):
                
                collected_length = len(file_list)
                
                print("File %d of %d" % (f_count, collected_length))
                
                this_dir = oj(os.path.dirname(sys.argv[0]))
                
                logger = oj(this_dir, 'log.txt')
                
                file_name = os.path.basename(file)
                
                command = """ curl --upload-file "%s" https://transfer.sh/%s >> "%s" """ % (file, file_name, logger)
                
                os.system(command)
        
        elif args.wget:
            for f_count, file in enumerate(file_list):
                collected_length = len(file_list)
            
                print("File %d of %d" % (f_count, collected_length))
            
                this_dir = oj(os.path.dirname(sys.argv[0]))
            
                logger = oj(this_dir, 'log.txt')
            
                file_name = os.path.basename(file)
            
                command = """ wget --method PUT --body-file="%s" https://transfer.sh/%s -O - -v >> "%s" """ % (file, file_name, logger)
            
                os.system(command)
    
    if skipped:
        print('files skipped')
        
        print('\n'.join(str(f) for f in skipped))
        
        print("check file names for illegal characters (spaces, $, (), [], {}) ")
    
    elif piff(args.path):
        
        print("File 1 of 1")

        file = args.path
        
        sub_file = os.path.basename(file)
        
        if re.search(r"[$/\\&\s\[\]{}^%]", sub_file):
            print('file skipped')
            print("check file names for illegal characters (spaces, $, (), [], {})")
            # print("use '-a' flag to auto-rename files")

        logger = oj(this_dir, 'log.txt')

        if args.curl:
        
            this_dir = oj(os.path.dirname(sys.argv[0]))

            file_name = os.path.basename(file)
    
            command = """ curl --upload-file "%s" https://transfer.sh/%s >> "%s" """ % (file, file_name, logger)
    
            os.system(command)

        elif args.wget:
            
            this_dir = oj(os.path.dirname(sys.argv[0]))
    
            file_name = os.path.basename(file)
    
            command = """ wget --method PUT --body-file="%s" https://transfer.sh/%s -O - -v >> "%s" """ % (file, file_name, logger)
    
            os.system(command)
        
        
    
    rdr = open(logger, 'r')
    
    lines = rdr.read()
    
    rdr.close()
    
    os.remove(logger)
    
    lines = lines.split('https')
    
    for l in lines:

        if not l:
            continue

        print('https%s' % l)
    
    pass
Exemple #14
0
def tradeStrategy2(verbose=False):
    """
    This strategy works using 2 principles;
    1) Shares with a relatively constant share price, which are not prone to
    sudden price changes, seem to be a safer investment
    2) If a share price has gained massively, then it is prone to a later lapse
    in price. Hence it is a good time to sell after a big price hike

    The 5 least volatile stocks are bought on the 225th trading day, or the date
    of the portfolio. These stocks are only sold if there is a significant
    increase in their price (x1.7). The aim is to only buy less risky stocks
    and keep transactions to a minimum to avoid losses.

    Data from the stocks dictionary is used hence stocks must be loaded, and
    the portfolio dictionary is modified accordingly.

    This is a conservative strategy compared to tradeStrategy1, hopefully
    fitted for investors who aim to guarantee a small profit but not aim big
    """
    m = 225  #the number of trading days to consider stock data
    n = 5  #the number of stocks to buy
    r = 1.7  #the price gain ratio at which to sell any stocks
    lstdates = list()
    for i in stocks:
        for j in stocks[i]:
            lstdates.append(normaliseDate(j))
        break
    lstdates.sort()

    def H(s, j):
        return stocks[s][lstdates[j]][1]

    def L(s, k):
        return stocks[s][lstdates[k]][2]

    def MaxH(
            s, j
    ):  #the max high price of stock s over 200 days previously from day j
        highs = list()
        for i in range(m):
            highs.append(stocks[s][lstdates[j - i]][1])
        return max(highs)

    def MinL(
        s, j
    ):  #the min low price of a stock s over 200 days previously from day j
        lows = list()
        for i in range(m):
            lows.append(stocks[s][lstdates[j - i]][2])
        return min(lows)

    def Q_buy2(
        s, j
    ):  #a measure of how much a stock has varied in price in the 200 days prior to i
        return MaxH(s, j) / MinL(s, j)

    def Q_sell(
        s, j, k
    ):  #a measure of how much value the stock has added/removed to portfolio
        return L(s, k) / H(s, j)

    try:
        k = lstdates.index(normaliseDate(
            portfolio['date']))  #the numbered day to work from
    except ValueError:  #if portfolio is not trading day, just choose day m-1
        k = m - 1
    if k < m - 1:
        k = m - 1
    qbuys = list()
    for s in stocks:
        qbuys.append(abs(Q_buy2(s, k) - 1))
    qbuys.sort()
    topn = qbuys[:n]  #So we aim to buy the top n stocks
    msps = (portfolio['cash']) / n  #maximum spend per stock
    for s in stocks:
        if abs(Q_buy2(s, k) - 1) in topn:
            v = 0
            while (stocks[s][lstdates[k]][1]) * v <= msps:
                v += 1
            else:
                v -= 1  #so the volume is the maximum whilst still having cash
            addTransaction({
                'date': lstdates[k],
                'symbol': s,
                'volume': v
            }, verbose)
    #so we have now bought the n least volatile stocks. Next we sell stocks if they make a big price gain
    for x in ld("stockdata/"):
        if x[:-4] in portfolio:
            l = k
            while Q_sell(x[:-4], k, l) < r and l < len(lstdates) - 1:
                l += 1
            if l < len(lstdates) - 1:
                try:
                    addTransaction(
                        {
                            'date': lstdates[l],
                            'symbol': x[:-4],
                            'volume': -1 * portfolio[x[:-4]]
                        }, verbose)
                except DateError:  #if attempt to perform transaction before portfolio date, unavoidable as stocks looked at 1 by 1
                    pass  #simply do not carry out transaction
"""prints a list of the subjects for whom create_pngs_individual.py did not
create a png."""

from os import listdir as ld, system
from os.path import join as pj


__author__ = 'smathias'


root_dir = '/home/Smathias/onrc/data/GOBSRepository2015/'
nii_dir = pj(root_dir, 'images_by_subject')
subjects = set(ld(nii_dir))
png_dir = pj(root_dir, 'pngs_individual_scans')
subjects_with_images = set(s.split('.')[0] for s in ld(png_dir) if '.png' in s)
missing_subjects = list(subjects.difference(subjects_with_images))

reese_dir = '/home/Dmckay/onrc/data/gobs/GOBSrepository/images_by_subject'
for subj in missing_subjects:
    source = pj(reese_dir, subj, 't1w') + '/*'
    dest = pj(nii_dir, subj, 't1w') + '/'
    cmd = 'cp -v %s %s' % (source, dest)
    # print cmd
    system(cmd)
Exemple #16
0
def load_redcap():
    """Loads the RedCap exported database in the local directory, cleans it up,
    and returns it as a pandas DataFrame."""

    # load the demographics file
    demo_f = pj(
        demo_dir, [f for f in ld(demo_dir) if 'GeneNetworksInfluenc' in f][0]
    )
    df = pd.read_csv(demo_f)

    # format the file
    df['subject_id'] = df.ix[:, 0]
    df = df[df.subject_id.str.contains('--') == False]  # remove false entries
    df.set_index('subject_id', inplace=True)
    df['group'] = df['Is this subject a patient, control or drop?']
    df = df[df.group != '0 =Drop']  # drop dropped subjects
    df['age'] = df.Age
    df['sex'] = df.Sex
    df.sex.replace('Male', 'male', inplace=True)
    df.sex.replace('Female', 'female', inplace=True)
    df['hand'] = df['Which hand do you use to write?']
    df.hand.replace('Left', 'left', inplace=True)
    df.hand.replace('Right', 'right', inplace=True)
    hq = df[[c for c in df.columns if 'Which hand' in c]]
    hq = hq.replace('Right', -1)
    hq = hq.replace('Left', 1)
    hq = hq.replace('Both', 0)
    df['handedness'] = hq.sum(axis=1)
    df['vocab'] = df['Vocabulary Raw Score']
    df['vocab_t'] = df['Vocabulary T-Score']
    df['matrix'] = df['Matrix Reasoning Raw Score']
    df['matrix_t'] = df['Matrix Reasoning T-Score']
    df['iq'] = df['Full-2 IQ ']
    df['hamd'] = df['HAMD Total Score']
    df['gaf'] = df['GAF Score:']
    df2 = None
    for c in [c for c in df.columns if 'Diagnosis Code:' in c]:
        if df2 is None:
            df2 = pd.get_dummies(df[c].astype(str))
        else:
            df2 = df2.add(pd.get_dummies(df[c].astype(str)), fill_value=0.)
    df2.drop('nan', axis=1, inplace=True)
    cols = df2.columns
    groupings = {
        'alcohol': [c for c in cols if '305.0' in c or '303.9' in c],
        'cannabis': [c for c in cols if '305.2' in c or '304.3' in c],
        'halluc': [c for c in cols if '305.3' in c or '304.5' in c],
        'cocaine': [c for c in cols if '305.6' in c or '304.2' in c],
        'opiod': [c for c in cols if '305.5' in c or '304.0' in c],
        'schizophrenia': [c for c in cols if '295.' in c],
        'psychosis': [c for c in cols if '295.' in c or '298.' in c
                      or '289.' in c or '312.22' in c],
        'rmdd': [c for c in cols if '296.3' in c],
        'bipolar': [c for c in cols if '296.5' in c],
        'anxiety_phobia': [c for c in cols if '300.' in c],
        'adhd': [c for c in cols if '314.9' in c],
    }
    groupings['drug_abuser'] = groupings['alcohol'] +\
                               groupings['cannabis'] +\
                               groupings['halluc'] +\
                               groupings['opiod'] +\
                               groupings['cocaine']
    for grouping, codes in groupings.iteritems():
        df[grouping] = df2[codes].any(axis=1)
    df = df[[
        'group',
        'age',
        'sex',
        # 'hand',
        # 'handedness',
        # 'vocab',
        # 'vocab_t',
        # 'matrix',
        # 'matrix_t',
        'iq',
        # 'hamd',
        # 'gaf',
        'cannabis'
    ]]
    df.group.replace('888 =Patient', 'patient', inplace=True)
    df.group.replace('999 =Control', 'control', inplace=True)

    return df
Exemple #17
0
            if key == "2":
                currentApp = "testapp"
                moveOn = True

        if currentApp == "other...":

            cls()
            print " Loading . . ."
            temp = range(1, 9)
            menuIDs = list()
            for i in temp:
                menuIDs.append(str(i))
            for i in list("abcdefghijklmnopqrstuvwxyz"):
                menuIDs.append(i)
            pyFiles = [
                f for f in ld("/pyos/apps") if isfile(join("/pyos/apps", f))
            ]
            menuItems = list()
            itemCount = 0
            for i in pyFiles:
                itemCount = itemCount + 1
                menuItems.append(i)

            cls()
            print " ~##### OTHER . . . #####~"
            print " [0] Back"
            temp = -1
            for j in menuItems:
                temp = temp + 1
                print " [{0}] {1}".format(menuIDs[temp], j)
            print " ~## Press a key . . . ##~"
def sys_work():
    files_to_analise = [
        item for item in ld() if 'data' in item and '.txt' in item
    ]
    return files_to_analise
from os import listdir as ld
from os import makedirs as md
from os.path import isdir as d
from re import findall
from shutil import copyfile as cp

root = 'C:/Users/Ritik/Desktop/2021 Autumn Semester/Assignments/AI/ArtificialIntelligenceProject/Algorithms/'

maxver = 0
for f in ld(root):
    if d(root + f) and f[0] == 'v':
        x = findall('v([0-9]*).*', f)
        maxver = max(maxver, int(x[0]))
        fmaxver = root + f + '/'

if not maxver:
    ver = 1
    md(root + f'v{ver}')
else:
    ver = maxver + 1
    md(root + f'v{ver}')
    for f in ld(fmaxver):
        if f'v{maxver}' in f:
            d = f.replace(f'v{maxver}', f'v{ver}')
        else:
            d = f
        cp(fmaxver + f, root + f'v{ver}/' + d)