Пример #1
0
def test_get_file(create_file, mock_box_client, monkeypatch):
    file = create_file()
    assert common.get_file(mock_box_client, file.id) is file

    assert common.get_file(mock_box_client, "1234") is None

    def file_raising(file_id):
        raise boxsdk.exception.BoxAPIException(400)

    monkeypatch.setattr(mock_box_client, "file", file_raising)

    with pytest.raises(boxsdk.exception.BoxAPIException):
        common.get_file(mock_box_client, "1234")
Пример #2
0
def choose_path(caller):
    result = common.get_file(
        common.filechooser_dialog('Open repository',
                                  Gtk.FileChooserAction.SELECT_FOLDER))

    if result:
        repository.start_with_path(result)
Пример #3
0
 def test_header_supression(self):
     """Ensure file is not given a second set of headers if it already exists"""
     d = self.DF('TestID', 'TestFile', self.temp_dir, 'TestGroup',
                 5, 'SAMPLE', ["T1","T2","T5"], self.sc)
     d = None
     d = self.DF('TestID', 'TestFile', self.temp_dir, 'TestGroup',
                 5, 'SAMPLE', ["T1","T2","T5"], self.sc)
     self.assertEqual('Timestamp,T1,T2,T5 Display\r\n',
                      get_file(self.temp_dir, 'TestFile'))
Пример #4
0
def add_path(caller):
    filename = common.get_file(patterns.add_path_dialog())
    if filename:
        #check for duplicate
        for row in include_liststore:
            row_filename = row[0]
            commonprfx = os.path.commonprefix((row_filename, filename))
            if commonprfx == row_filename:
                patterns.path_msg_dialog(filename, row_filename)
                return
            elif commonprfx == filename:
                include_liststore.remove(row.iter)

        include_liststore.append([filename])
Пример #5
0
	def add_path(self, caller):
		filename =  common.get_file(add_path_dialog())
		if filename:
			#check for duplicate
			for row in self.liststore:
				if row[0] == 0:
					row_filename = row[1]
					commonpath = os.path.commonpath((row_filename, filename))
					if commonpath == row_filename:
						path_msg_dialog(filename, row_filename)
						return
					elif commonpath == filename:
						self.liststore.remove(row.iter)
			self.liststore.append([0, filename])
Пример #6
0
	def add_pattern_file(self, caller):
		dialog = common.filechooser_dialog('Add pattern file', Gtk.FileChooserAction.OPEN)

		filename = common.get_file(dialog)
		if filename:
			filename = os.path.realpath(filename)
			#check for duplicate
			unique = True
			for row in self.liststore:
				if row[0] == 4 and row[1] == filename:
					common.run_msg_dialog(Gtk.MessageType.INFO, 'The pattern file <tt>%s</tt> has already been added' %
					                                            GLib.markup_escape_text(filename))
					return
			self.liststore.append([4, filename])
Пример #7
0
 def test_recording_sampling_sensors(self):
     """Test Recording Using Sampling Only"""
     df = self.DF('TestID', 'TestFile', self.temp_dir, 'TestGroup',
                 5, 'SAMPLE', ["T1","T2","T5"], self.sc)
     self.dfr.put(df)
     self.sc.start_averaging_sensors()
     self.dfr.start_data_files()
     time.sleep(15)
     self.sc.stop_averaging_sensors()
     self.dfr.stop_data_files()
     self.assertRegexpMatches(get_file(self.temp_dir, 'TestFile'),
                              ('Timestamp,T1,T2,T5\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},1,2,5\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},2,4,10\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},3,6,15\r\n')
                              )
Пример #8
0
 def test_recording_averaging_sensors(self):
     """Test Recording Using Averaging Sensors As Well"""
     # We cannot just start self.dfr up here, because the sampling results
     # will depend on how close to time.time()%12 == 0 the DFR was started.
     df = self.DF('TestID', 'TestFile', self.temp_dir, 'TestGroup',
                  12, 'AVERAGE', ["T1","T2","T5"], self.sc)
     self.sc.start_averaging_sensors()
     for _ in [1,2,3]:
         time.sleep(12)
         df.collect_data()
     self.sc.stop_averaging_sensors()
     self.assertRegexpMatches(get_file(self.temp_dir, 'TestFile'),
                              ('Timestamp,T1_avg,T2_avg,T5_avg\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},7.5,15.0,37.5\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},19.5,39.0,97.5\r\n'
                               '\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},31.5,63.0,157.5\r\n')
                              )
def lambda_handler(event, context):
    LOGGER.info(json.dumps(event))

    raw_body = event["body"]
    body = json.loads(raw_body)
    trigger = body["trigger"]
    webhook_id = body["webhook"]["id"]
    source = body["source"]

    # The event structure varies by trigger
    if "item" in source:
        box_id = source["item"]["id"]
        box_type = source["item"]["type"]
    elif "id" in source:
        # not covered by tests
        box_id = source["id"]
        box_type = source["type"]
    else:
        # not covered by tests
        raise RuntimeError("Missing id field")

    LOGGER.info("Received trigger %s on %s id %s", trigger, box_type, box_id)

    # only get a box client if we're actually going to need one
    if trigger not in common.HANDLED_TRIGGERS:
        LOGGER.info("%s is not supported by this endpoint", trigger)
        return STATUS_SUCCESS

    client, webhook_key = common.get_box_client()
    ddb = common.get_ddb_table()

    webhook = client.webhook(webhook_id)
    is_valid = webhook.validate_message(bytes(raw_body, "utf-8"),
                                        event["headers"], webhook_key)
    if not is_valid:
        LOGGER.critical("Received invalid webhook request")
        return STATUS_SUCCESS

    if (trigger in common.HANDLED_FILE_TRIGGERS) and (box_type == "file"):
        file = common.get_file(client, box_id)
        if not file:
            LOGGER.warning("File %s is missing (trashed or deleted)", box_id)
            # We don't know what the file's path was, so we'll just have to
            # let the sync lambda clean up DynamoDB.
            return STATUS_SUCCESS

        # if the file isn't public but any parent directory is, make a shared link
        parent_public = common.is_any_parent_public(client, file)
        if (not common.is_box_object_public(file)) and parent_public:
            # this includes an api call
            file = common.create_shared_link(client,
                                             file,
                                             access="open",
                                             allow_download=True)
        # if the file is public but no parent directory is, delete the shared link
        if (common.is_box_object_public(file)) and (not parent_public):
            file = common.remove_shared_link(client, file)

        if common.is_box_object_public(file):
            common.put_file_item(ddb, file)
        else:
            common.delete_file_item(ddb, file)
    elif (trigger in common.HANDLED_FOLDER_TRIGGERS) and (box_type
                                                          == "folder"):
        folder = common.get_folder(client, box_id)
        if not folder:
            LOGGER.warning("Folder %s is missing (trashed or deleted)", box_id)
            # The Box API doesn't appear to give us a way to list the contents of
            # a trashed folder, so we're just going to have to let the sync lambda
            # clean up the relevant DynamoDB rows.
            return STATUS_SUCCESS

        folder_shared = common.is_box_object_public(folder)
        for file, shared in common.iterate_files(folder, shared=folder_shared):

            # if the file isn't public but any parent directory is
            if (not common.is_box_object_public(file)) and shared:
                # this includes an api call
                file = common.create_shared_link(client,
                                                 file,
                                                 access="open",
                                                 allow_download=True)
            elif (common.is_box_object_public(file)) and (not shared):
                file = common.remove_shared_link(client, file)

            if common.is_box_object_public(file):
                common.put_file_item(ddb, file)
            else:
                common.delete_file_item(ddb, file)

    return STATUS_SUCCESS
Пример #10
0
def scrape_armour(path):
    conn = common.open_db(path)
    db = conn.cursor()
    domain = "https://aonprd.com/"

    categories = ["Light", "Medium", "Heavy", "Sheild"]

    for category in categories:
        armour_list_html = common.get_file(domain +
                                           "/EquipmentArmor.aspx?Category=" +
                                           category)
        armourlist_soup = BeautifulSoup(armour_list_html, "html.parser")
        armour_names = set()
        for row in armourlist_soup.find_all("tr"):
            if row.find("td") == None:
                continue
            armour_names.add(row.find("a").get("href"))
        for url in armour_names:
            print(url)
            armour_html = common.get_file(domain + url)
            armour_soup = BeautifulSoup(armour_html, "html.parser")
            td = armour_soup.find("td")
            if td == None:
                print("We've got an issue with " + u)
                continue
            data = {}
            data["name"] = td.find("h1").string
            if data["name"] is None:
                for s in td.find("h1").descendants:
                    if s.string != None:
                        data["name"] = str.lstrip(s.string)
                        break
            main_c = 0
            sources = []
            for child in td.find("span").children:
                if child.name == "br" and child.next_sibling.name != "b":
                    main_c = child.next_sibling
                    break
                if child.name != "b":
                    continue
                if child.string == "Source":
                    x = child.next_sibling
                    while x.name != "h3":
                        if x.name == "a":
                            sources.append(common.reference(db, x))
                        x = x.next_sibling
                    continue
                if child.string in [
                        "Cost",
                        "Weight",
                        "Armor Bonus",
                        "Max Dex Bonus",
                        "Armor Check Penalty",
                        "Arcane Spell Failure Chance",
                ]:
                    array_item = (child.string.lower().replace(
                        " ", "_").replace("armor", "armour")
                                  )  # okay that last bit is pedantry, BUT
                    data[array_item] = common.integer_property(
                        child.next_sibling.string)
                    continue
                if child.string == "Speed":
                    speeds = child.next_sibling.string.split("/")
                    data["speed_30"] = common.integer_property(speeds[0])
                    data["speed_20"] = common.integer_property(speeds[1])
                    continue
                print(child.string + ": " + child.next_sibling.string)
            my_str = ""
            u = None
            for header in td.find_all("h3"):
                if header.string == "Description":
                    u = header.next_sibling
            while u != None:
                my_str += str(u)
                u = u.next_sibling
            print(data)

            data["description"] = common.cleanup_html(my_str)

            db.execute(
                "INSERT INTO armour (name, cost, weight, armour_bonus, max_dex, check_pen, arcane_failure_chance, speed_30, speed_20, description) VALUES (:name, :cost, :weight, :armour_bonus, :max_dex_bonus, :armour_check_penalty, :arcane_spell_failure_chance, :speed_30, :speed_20, :description);",
                data,
            )
            db.execute("SELECT last_insert_rowid();")
            rows = db.fetchall()
            i = rows[0][0]
            for x in sources:
                x.append(i)
                print(x)
                db.execute(
                    "INSERT INTO reference (table_name, source, page, destination) VALUES ('armour', ?, ?, ?);",
                    x,
                )
    conn.commit()
    conn.close()
Пример #11
0

####################################################################################################
### Main Program

states = ('AA', 'AB', 'BB')
start_probability = {'AA': 0.25, 'AB': 0.5, 'BB': 0.25}

Error_Rates = {}
inZ = open(Errorfile, "rU")
for line_idx, line in enumerate(inZ):
    cols = line.replace('\n', '').split()
    #imswc001 482 0.0608584706859 1e-05 0.0180081186063 -159.05915623
    Error_Rates[cols[0]] = [float(cols[2]), float(cols[3]), float(cols[4])]

Qi = common.get_file(GAOutFile, 'adfadfa')

xx = [idx for idx, i in enumerate(Qi) if 'individual=1 ' in i[0]]
#print xx
final = Qi[xx[-1] + 2][0].replace('raw list order= ', '')
print final
os.popen('python make.new.input.file.from.winning.order.py ' + MarkerInit +
         ' ' + '"' + final + '" > better.' + MarkerInit)

Position = {}
inY = open('better.' + MarkerInit, "rU")
TotalMarkers = 0
for line_idx, line in enumerate(inY):
    cols = line.replace('\n', '').split('\t')
    Position[cols[0] + "_" + cols[1]] = line_idx
    TotalMarkers += 1
Пример #12
0
def scrape_trait(path):
    conn = common.open_db(path)
    db = conn.cursor()
    domain = "https://aonprd.com/"

    category_list_html = common.get_file(domain + "Traits.aspx")
    soup = BeautifulSoup(category_list_html, "html.parser")

    trait_urls = set()

    # for every trait category
    for a in soup.find("div", id="main").descendants:
        if a.name != "a":
            continue
        print(a.string)
        category_html = common.get_file(domain + a.get("href"))
        traitslist_soup = BeautifulSoup(category_html, "html.parser")
        for td in traitslist_soup.find_all("td"):
            a = td.find("a")
            if a != None:
                trait_urls.add(a.get("href"))
    print(len(trait_urls))

    for u in trait_urls:
        print(u)
        trait_html = common.get_file(domain + u)
        trait_soup = BeautifulSoup(trait_html, "html.parser")
        td = trait_soup.find("td")
        if td == None:
            print("We've got an issue with " + u)
            continue
        data = {}
        data["name"] = td.find("h1").string
        if data["name"] is None:
            for s in td.find("h1").descendants:
                if s.string != None:
                    data["name"] = str.lstrip(s.string)
                    break

        # source = common.reference(db, td.find("a", class_="external-link"))
        sources = []
        # data["source"] = source[0]
        # data["page"] = source[1]
        data["requirements"] = None
        main_c = 0
        for child in td.find("span").children:
            if child.name == "br" and child.next_sibling.name != "b":
                main_c = child.next_sibling
                break
            if child.name != "b":
                continue
            if child.string == "Source":
                x = child.next_sibling
                while x.name != "br":
                    if x.name == "a":
                        sources.append(common.reference(db, x))
                    x = x.next_sibling
                continue
            if child.string == "Category":
                data["category"] = str.lstrip(child.next_sibling.string)
                continue
            if child.string == "Requirement(s)":
                data["requirements"] = str.lstrip(child.next_sibling.string)
                continue
            print(child.string + ": " + child.next_sibling.string)
        my_str = str(main_c)

        while main_c.next_sibling != None:
            main_c = main_c.next_sibling
            my_str += str(main_c)

        data["description"] = common.cleanup_html(my_str)
        print(sources)
        print(data)
        db.execute(
            "INSERT INTO trait(name, category, requirements, description) VALUES (:name, :category, :requirements, :description);",
            data,
        )
        db.execute("SELECT last_insert_rowid();")
        rows = db.fetchall()
        i = rows[0][0]
        for x in sources:
            x.append(i)
            db.execute(
                "INSERT INTO reference (table_name, source, page, destination) VALUES ('trait', ?, ?, ?);",
                x,
            )

    conn.commit()
    conn.close()
Пример #13
0
def scrape_weapon(path):
    conn = common.open_db(path)
    db = conn.cursor()
    domain = "https://aonprd.com/"

    prociciencies = ["Simple", "Martial", "Exotic", "Ammo"]
    weapon_links = set()
    for p in prociciencies:
        weapon_list_html = common.get_file(
            domain + "/EquipmentWeapons.aspx?Proficiency=" + p)
        weapon_list_soup = BeautifulSoup(weapon_list_html, "html.parser")
        for row in weapon_list_soup.find_all("tr"):
            if row.find("td") == None:
                continue
            weapon_links.add(row.find("a").get("href"))

    for link in weapon_links:
        print(link)
        weapon_html = common.get_file(domain + link)
        weapon_soup = BeautifulSoup(weapon_html, "html.parser")
        data = {}
        td = weapon_soup.find("td")
        if td == None:
            print("We've got an issue with " + u)
            continue
        data = {
            "dmg_s": None,
            "dmg_m": None,
            "dmg_type": None,
            "critical": None,
            "range": None,
            "special": None,
        }
        sources = []
        groups = []
        data["name"] = td.find("h1").string
        if data["name"] is None:
            for s in td.find("h1").descendants:
                if s.string != None:
                    data["name"] = str.lstrip(s.string)
                    break
        for child in td.find("span").children:
            if child.name != "b":
                continue
            if child.string == "Source":
                x = child.next_sibling
                while x.name != "h3":
                    if x.name == "a":
                        sources.append(common.reference(db, x))
                    x = x.next_sibling
                continue
            if child.string in ["Cost", "Weight", "Range"]:
                array_item = child.string.lower().replace(" ", "_")
                data[array_item] = common.integer_property(
                    child.next_sibling.string)
                continue
            if child.string in [
                    "Proficiency", "Critical", "Category", "Special"
            ]:
                array_item = child.string.lower().replace(" ", "_")
                data[array_item] = str.strip(
                    child.next_sibling.string).replace(";", "")
                if data[array_item] == "—":
                    data[array_item] = None
                continue
            if child.string == "Damage":
                a = child.next_sibling.string.split(" ")
                data["dmg_s"] = a[1]
                data["dmg_m"] = a[3]
                if data["dmg_s"] == "—":
                    data["dmg_s"] = None
                if data["dmg_m"] == "—":
                    data["dmg_m"] = None
                continue
            if child.string == "Type":
                data["dmg_type"] = damage_type(child.next_sibling.string)
                continue
            if child.string == "Weapon Groups":
                a = child.next_sibling
                while a.name not in ["h3", "b"]:
                    if a.name == "a":
                        groups.append(a.string)
                    a = a.next_sibling
                continue
            print(child.string + ": " + child.next_sibling.string)
        my_str = ""
        u = None
        for header in td.find_all("h3"):
            if header.string == "Description":
                u = header.next_sibling
        while u != None:
            my_str += str(u)
            u = u.next_sibling
        data["description"] = common.cleanup_html(my_str)
        print(data)
        db.execute(
            "INSERT INTO weapon(name, cost, weight, dmg_s, dmg_m, dmg_type, critical, range, special, category, proficiency, description) VALUES(:name, :cost, :weight, :dmg_s, :dmg_m, :dmg_type, :critical, :range, :special, :category, :proficiency, :description);",
            data,
        )
        db.execute("SELECT last_insert_rowid();")
        rows = db.fetchall()
        i = rows[0][0]
        for x in sources:
            x.append(i)
            db.execute(
                "INSERT INTO reference (table_name, source, page, destination) VALUES ('weapon', ?, ?, ?);",
                x,
            )
        for x in groups:
            db.execute(
                "INSERT INTO weapon_group (weapon_id, group_name) VALUES ( ?, ?);",
                [i, x],
            )
    conn.commit()
    conn.close()
    sval = set([abs(i) for i in contig_ord2[a : b + 1]])
    sidx = set(range(a, b + 1))
    for i in range(a, b + 1):
        new[i] = contig_ord2[i]
    picks = [i for i in contig_ord1 if abs(i) not in sval]
    for i in [j for j in range(len(contig_ord1)) if j not in sidx]:
        p = picks.pop(0)
        new[i] = p
    c = ContigOrder(new, chrom_dict, scaff_lookup, memo, subset_memo)
    return c


############################END#######################################################################

#####READ IN DATA FILES AND PREP FOR GA###############################################################
init_file = get_file(sys.argv[1])
chrom_dict = defaultdict(list)
scaff_order = []
for i, j in init_file:
    chrom_dict[i].append(j)
    if i not in scaff_order:
        scaff_order.append(i)

chrom_list = []
scaff_lookup = {}
idx = 1
for i in scaff_order:
    s = sorted(chrom_dict[i], key=lambda s: int(s))
    if chrom_dict[i] == s:
        chrom_list.append(idx)
    else:
Пример #15
0
    random.shuffle(parents)
    contig_ord1, contig_ord2 = parents #just scramble which parent is donor, so p2 isn't always
    new = ['-' for i in contig_ord1]
    pos = range(len(new))
    a,b = sorted(sampler(pos, 2))
    sval = set([abs(i) for i in contig_ord2[a:b+1]])
    sidx = set(range(a,b+1))
    for i in range(a, b+1): new[i] = contig_ord2[i]
    picks = [i for i in contig_ord1 if abs(i) not in sval]
    for i in [j for j in range(len(contig_ord1)) if j not in sidx]:
        p = picks.pop(0)
        new[i] = p
    c = ContigOrder(new, chrom_dict, scaff_lookup)
    return c

init_file = get_file(sys.argv[1])
chrom_dict = defaultdict(list)
scaff_order = []
for i,j in init_file:
    chrom_dict[i].append(j)
    if i not in scaff_order: scaff_order.append(i)

chrom_list = []
scaff_lookup = {}
idx = 1
for i in scaff_order:
    s = sorted(chrom_dict[i], key = lambda s: int(s))
    if chrom_dict[i] == s:
        chrom_list.append(idx)
    else: chrom_list.append(idx*-1)
    chrom_dict[i] = s
Пример #16
0
####################################################################################################
### Main Program

states = ('AA','AB','BB')
start_probability = {'AA':0.25,'AB':0.5,'BB':0.25}


Error_Rates={}
inZ = open(Errorfile,"rU")
for line_idx, line in enumerate(inZ):
	cols = line.replace('\n', '').split() 
#imswc001 482 0.0608584706859 1e-05 0.0180081186063 -159.05915623
	Error_Rates[cols[0]]=[float(cols[2]),float(cols[3]),float(cols[4])]


Qi =  common.get_file(GAOutFile, 'adfadfa')

xx = [idx for idx, i in enumerate(Qi) if 'individual=1 ' in i[0]]
#print xx
final =  Qi[xx[-1]+2][0].replace('raw list order= ', '')
print final
os.popen('python make.new.input.file.from.winning.order.py '+MarkerInit+' '+'"'+final+'" > better.'+ MarkerInit)

Position={}
inY = open('better.'+ MarkerInit,"rU")
TotalMarkers=0
for line_idx, line in enumerate(inY):
	cols = line.replace('\n', '').split('\t') 
	Position[cols[0]+"_"+cols[1]]=line_idx
	TotalMarkers+=1
Пример #17
0
def yield_N(x, n):
    idx, xidx = 0, 0
    k = {}
    k[idx] = []
    while xidx < len(x):
        while len(k[idx]) < n and xidx < len(x):
            k[idx].append(tuple(x[xidx]))
            xidx+=1
        idx+=1
        k[idx] = []
    for i in k.keys():
        if not k[i]: del(k[i])
    return k

x = get_file(sys.argv[1])
ch_num = sys.argv[1].split('.')[0].replace('chrom', '')
marker_file = get_file('markers.'+ch_num+'.LM.txt', '\t')
scaff_num = len(set([i[0] for i in marker_file]))
#print scaff_num, set([i[0] for i in marker_file])
#for i in x: print i
start_lk = float(x[7][-1].split('=')[-1])
#print start_lk, x[7][-1]
etime = [i for i in x if 'elapsed' in i]
gen = [int(i[0].replace('generation=', '').replace(';', '')) for i in etime]
sec_cum = [float(i[-1]) for i in etime]
last = 0
sec_per = []
for i in sec_cum:
    sec_per.append(i-last)
    last = i
Пример #18
0
def add_movie():
    for each_movie_path in movies_path:
        movies = Path(each_movie_path).glob("*/*/*")
        for each_movie in movies:
            split_movie = str(each_movie).split("\\")
            movie_title = split_movie[-1]
            movie_year = int(split_movie[-2])
            movie_cat = split_movie[-3]
            movie_files = inside_folders(each_movie)
            video_full_path = ""
            sub_full_path = ""

            # Go to each file and add select the latest added file
            for each_movie_file in movie_files:
                split_movie_file = str(each_movie_file).split(".")
                if split_movie_file[-1] in ('mp4', 'mkv', 'flv', 'avi', 'm4v',
                                            'm4p'):
                    # if(split_movie_file[-1] == "mp4"):
                    if (video_full_path):
                        store_video_mtime = Path.stat(video_full_path).st_mtime
                        new_video_mtime = Path.stat(each_movie_file).st_mtime
                        if (new_video_mtime > store_video_mtime):
                            video_full_path = each_movie_file
                    else:
                        video_full_path = each_movie_file
                elif (split_movie_file[-1] == "srt"):
                    if (sub_full_path):
                        store_sub_mtime = Path.stat(sub_full_path).st_mtime
                        new_sub_mtime = Path.stat(each_movie_file).st_mtime
                        if (new_sub_mtime > store_sub_mtime):
                            sub_full_path = each_movie_file
                    else:
                        sub_full_path = each_movie_file

            video_file = get_file(video_full_path, each_movie_path)
            sub_file = get_file(sub_full_path, each_movie_path)
            file_size = round(
                Path.stat(video_full_path).st_size / 1000000000, 2)
            qulity = ""

            # if the movie dont have any qulity then make it blank
            if (not video_file.find("__") == -1):
                qulity = video_file.split("__")[-2]

            # Check is this movie already added or not
            abl_movies = Movie.objects.filter(title=movie_title,
                                              year__year=movie_year)

            if (abl_movies):
                movie = abl_movies[0]
                if (not video_file == movie.file_path):
                    # Delete the old video file
                    old_video_file = f"{get_root_file(video_full_path)}{movie.file_path}"
                    if (Path(old_video_file).exists()):
                        Path(old_video_file).unlink()

                    if (movie.subtitle):
                        # Delete the old subtitle and if new update movie dont have any then set it null
                        old_sub_file = f"{get_root_file(video_full_path)}{movie.subtitle}"
                        if (Path(old_sub_file).exists()):
                            Path(old_sub_file).unlink()
                        if (sub_file == movie.subtitle):
                            sub_file = ""

                    # Update the path
                    movie.file_path = video_file
                    movie.subtitle = sub_file
                    movie.quality = get_or_add_quality(qulity)
                    movie.file_size = file_size
                    movie.add_date = datetime.now()
                    movie.save()

            else:
                search_movie = f"https://api.themoviedb.org/3/search/movie?api_key={api_key}&language=en-US&query={movie_title}&page=1&include_adult=false&year={movie_year}"
                search_movie = requests.get(search_movie).json()

                if (search_movie["results"]):
                    tmdb_id = search_movie["results"][0]['id']
                    get_info = f"https://api.themoviedb.org/3/movie/{tmdb_id}?api_key={api_key}&language=en-US"
                    get_image = f"https://api.themoviedb.org/3/movie/{tmdb_id}/images?api_key={api_key}"
                    movie_info = requests.get(get_info).json()
                    images = requests.get(get_image).json()['backdrops']
                    genres = movie_info["genres"]
                    collection = movie_info['belongs_to_collection']

                    title = movie_info["title"]
                    tagline = movie_info['tagline']
                    overview = movie_info['overview']
                    poster = movie_info['poster_path']
                    backdrop = movie_info['backdrop_path']
                    vote_count = movie_info['vote_count']
                    vote_average = movie_info['vote_average']

                    img_1 = images[0]['file_path'] if len(images) > 0 else ''
                    img_2 = images[1]['file_path'] if len(images) > 1 else ''
                    img_3 = images[2]['file_path'] if len(images) > 2 else ''
                    img_4 = images[3]['file_path'] if len(images) > 3 else ''

                    tmdb_id = movie_info["id"]
                    imdb_id = movie_info["imdb_id"]
                    release_date = search_movie["results"][0]['release_date']

                    # Get the Year
                    year_query = Year.objects.filter(year=movie_year)
                    year = ''
                    if (not year_query):
                        year = Year(year=movie_year)
                        year.save()
                    else:
                        year = year_query[0]

                    # Get the Catagory
                    cat_query = Movie_Category.objects.filter(name=movie_cat)
                    cat = ''
                    if (not cat_query):
                        cat = Movie_Category(name=movie_cat)
                        cat.save()
                    else:
                        cat = cat_query[0]

                    # Get the Qulity
                    qut = get_or_add_quality(qulity)

                    # Get the Collections
                    col = create_collection(collection)

                    # # add the movie to the database
                    add_movie = Movie(title=movie_title,
                                      imdb_title=title,
                                      year=year,
                                      catagory=cat,
                                      quality=qut,
                                      tagline=tagline,
                                      overview=overview,
                                      file_path=video_file,
                                      file_size=file_size,
                                      subtitle=sub_file,
                                      tmdb_id=tmdb_id,
                                      imdb_id=imdb_id,
                                      release_date=release_date,
                                      collections=col,
                                      vote_average=vote_average,
                                      vote_count=vote_count)

                    if (poster):
                        img_url = f"https://image.tmdb.org/t/p/w300_and_h450_bestv2{poster}"
                        poster_image = get_tmp_image(img_url)
                        add_movie.poster.save(f"{movie_title}.jpg",
                                              File(poster_image),
                                              save=True)
                    if (backdrop):
                        backdrop_img_url = f"https://image.tmdb.org/t/p/original/{backdrop}"
                        backdrop_img = get_tmp_image(backdrop_img_url)
                        add_movie.backdrop.save(f"{movie_title}_backdrop.jpg",
                                                File(backdrop_img),
                                                save=True)
                    if (img_1):
                        img_1_url = f"https://image.tmdb.org/t/p/original/{img_1}"
                        img = get_tmp_image(img_1_url)
                        add_movie.img_1.save(f"{movie_title}_backdrop.jpg",
                                             File(img),
                                             save=True)
                    if (img_2):
                        img_2_url = f"https://image.tmdb.org/t/p/original/{img_2}"
                        img = get_tmp_image(img_2_url)
                        add_movie.img_2.save(f"{movie_title}_backdrop.jpg",
                                             File(img),
                                             save=True)
                    if (img_3):
                        img_3_url = f"https://image.tmdb.org/t/p/original/{img_3}"
                        img = get_tmp_image(img_3_url)
                        add_movie.img_3.save(f"{movie_title}_backdrop.jpg",
                                             File(img),
                                             save=True)
                    if (img_4):
                        img_4_url = f"https://image.tmdb.org/t/p/original/{img_4}"
                        img = get_tmp_image(img_4_url)
                        add_movie.img_4.save(f"{movie_title}_backdrop.jpg",
                                             File(img),
                                             save=True)

                    try:
                        if (genres):
                            for genra in genres:
                                get_genres = Genre.objects.get(id=genra['id'])
                                add_movie.genres.add(get_genres)
                    except Exception as e:
                        print(e)

                    actors_list = actors(tmdb_id, add_movie)
                    add_movie.actors.set(actors_list)
                    add_trailer(tmdb_id, add_movie)

                    add_movie.save()
Пример #19
0
        arguments["server_cert_validation"] = "validate"
        arguments["ca_trust_path"] = certpath

session = winrm.Session(target=endpoint,
                        auth=(username, password),
                        **arguments)

copy = CopyFiles(session)

destination = args.destination
filename = os.path.basename(args.source)

if filename in args.destination:
    destination = destination.replace(filename, '')
else:
    isFile = common.check_is_file(args.destination)
    if isFile:
        filename = common.get_file(args.destination)
        destination = destination.replace(filename, '')
    else:
        filename = os.path.basename(args.source)

if not os.path.isdir(args.source):
    copy.winrm_upload(remote_path=destination,
                      remote_filename=filename,
                      local_path=args.source,
                      quiet=quiet)
else:
    log.warn("The source is a directory, skipping copy")

Пример #20
0
 def test_file_creation(self):
     """Ensure file is created and has correct headers"""
     d = self.DF('TestID', 'TestFile', self.temp_dir, 'TestGroup',
                 5, 'SAMPLE', ["T1","T2","T5"], self.sc)
     self.assertEqual('Timestamp,T1,T2,T5 Display\r\n',
                      get_file(self.temp_dir, 'TestFile'))
def lambda_handler(event, context):
    LOGGER.info(json.dumps(event))

    raw_body = event["body"]
    body = json.loads(raw_body)
    trigger = body["trigger"]
    webhook_id = body["webhook"]["id"]
    source = body["source"]

    # The event structure varies by trigger
    if "item" in source:
        box_id = source["item"]["id"]
        box_type = source["item"]["type"]
    elif "id" in source:
        box_id = source["id"]
        box_type = source["type"]
    else:
        raise RuntimeError("Missing id field")

    LOGGER.info("Received trigger %s on %s id %s", trigger, box_type, box_id)

    # only get a box client if we're actually going to need one
    if trigger not in common.HANDLED_TRIGGERS:
        LOGGER.info("%s is not supported by this endpoint", trigger)
        return STATUS_SUCCESS

    client, webhook_key = common.get_box_client()
    ddb = common.get_ddb_table()

    webhook = client.webhook(webhook_id)
    is_valid = webhook.validate_message(bytes(raw_body, "utf-8"),
                                        event["headers"], webhook_key)
    if not is_valid:
        LOGGER.critical("Received invalid webhook request")
        return STATUS_SUCCESS

    if trigger in common.HANDLED_FILE_TRIGGERS:
        file = common.get_file(client, box_id)
        if not file:
            LOGGER.warning("File %s is missing (trashed or deleted)", box_id)
            common.delete_file_item(ddb, file)
            return STATUS_SUCCESS

        if common.is_box_file_public(file):
            common.put_file_item(ddb, file)
        else:
            common.delete_file_item(ddb, file)
    elif trigger in common.HANDLED_FOLDER_TRIGGERS:
        folder = common.get_folder(client, box_id)
        if not folder:
            LOGGER.warning("Folder %s is missing (trashed or deleted)", box_id)
            # NOTE(eslavich): The Box API doesn't appear to give us a way to
            # list the contents of a trashed folder, so we're just going to have
            # to let the sync lambda clean up the relevant DynamoDB rows.
            return STATUS_SUCCESS

        for file in common.iterate_files(folder):
            if common.is_box_file_public(file):
                common.put_file_item(ddb, file)
            else:
                common.delete_file_item(ddb, file)

    return STATUS_SUCCESS