diff --git a/btstrm/btstrm.py b/btstrm/btstrm.py index de8f2c3..153ae39 100644 --- a/btstrm/btstrm.py +++ b/btstrm/btstrm.py @@ -28,21 +28,23 @@ temp_files = [] + class CaseSensitiveConfigParser(configparser.ConfigParser): def optionxform(self, optionstr): return optionstr + def load_config(): default_config = { - 'LANG': 'es-ES', - 'JACKETT_API_KEY': '', - 'JACKETT_URL': 'http://127.0.0.1:9117', - 'TIMEOUT': '30' + "LANG": "es-ES", + "JACKETT_API_KEY": "", + "JACKETT_URL": "http://127.0.0.1:9117", + "TIMEOUT": "30", } config = CaseSensitiveConfigParser() - config_dir = os.path.join(os.path.expanduser('~'), '.config') + config_dir = os.path.join(os.path.expanduser("~"), ".config") if not os.path.exists(config_dir): os.makedirs(config_dir) @@ -52,9 +54,9 @@ def load_config(): if not os.path.exists(config_path): print("Config file not found, creating one with default values...") - config['DEFAULT'] = default_config + config["DEFAULT"] = default_config - with open(config_path, 'w') as f: + with open(config_path, "w") as f: config.write(f) else: @@ -62,34 +64,36 @@ def load_config(): try: config.read(config_path) for key in default_config.keys(): - if key not in config['DEFAULT']: + if key not in config["DEFAULT"]: raise KeyError(f"Key {key} missing from existing configuration.") else: - value = str(config.get('DEFAULT', key)) + value = str(config.get("DEFAULT", key)) globals()[key] = value except Exception as e: print(f"Error loading settings: {e}") + load_config() extensions = ("mp4", "m4v", "mkv", "avi", "mpg", "mpeg", "flv", "webm") -home_dir = os.path.expanduser('~') +home_dir = os.path.expanduser("~") players = ( ("omxplayer", "--timeout", "60"), - ("mpv","--really-quiet","--cache=no"), + ("mpv", "--really-quiet", "--cache=no"), # ("mpv", "--pause", "--cache=yes", "--cache-on-disk=yes", "--demuxer-thread=yes", "--demuxer-cache-dir=" + home_dir + "/.cache/mpv"), ("vlc", "--file-caching", "10000"), ) + # TMDB helper functions def fetch_movie_data(search_term, language=LANG): QUERY = quote(search_term) url = f"https://www.themoviedb.org/search?query={QUERY}&language={language}" headers = { - 'Accept-Encoding': 'gzip, deflate, br', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:122.0) Gecko/20100101 Firefox/122.0', - } + "Accept-Encoding": "gzip, deflate, br", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:122.0) Gecko/20100101 Firefox/122.0", + } response = requests.get(url, headers=headers) if response.status_code == 200: @@ -98,39 +102,50 @@ def fetch_movie_data(search_term, language=LANG): print(f"Failed to fetch data from TMDB. Status code: {response.status_code}") return "" + def parse_html_for_posters_and_titles(html_content): - soup = BeautifulSoup(html_content, 'html.parser') + soup = BeautifulSoup(html_content, "html.parser") images = soup.select('img[loading][class="poster w-[100%]"][alt]') results = [] for image in images: - srcset = image.get('srcset').split(',')[-1].strip().split(' ')[0] - title = image.get('alt').strip() + srcset = image.get("srcset").split(",")[-1].strip().split(" ")[0] + title = image.get("alt").strip() results.append((srcset, title)) return results + def search_alternative_titles(search_term): html_content = fetch_movie_data(search_term) results = parse_html_for_posters_and_titles(html_content) return results + def load_image(image_url): response = requests.get(image_url, stream=True) response.raise_for_status() - temp_filename = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".jpg") + temp_filename = os.path.join( + tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".jpg" + ) - with open(temp_filename, 'wb') as f: + with open(temp_filename, "wb") as f: shutil.copyfileobj(response.raw, f) temp_files.append(temp_filename) return temp_filename + def load_images_threaded(urls): images = [] with ThreadPoolExecutor(max_workers=10) as executor: futures = {executor.submit(load_image, url): url for url in urls} - for future in tqdm(concurrent.futures.as_completed(futures), total=len(urls), desc='Loading images', ncols=70): + for future in tqdm( + concurrent.futures.as_completed(futures), + total=len(urls), + desc="Loading images", + ncols=70, + ): try: tmp_file_name = future.result() url = futures[future] @@ -139,27 +154,33 @@ def load_images_threaded(urls): print(f"Error loading image: {e}") return sorted(images, key=lambda x: urls.index(x[1])) + def which(x): for d in os.getenv("PATH", "").split(":"): if os.path.exists(os.path.join(d, x)): return os.path.join(d, x) + def find_player(players): for player in players: if which(player[0]): return player + def find_files(filepath): for dirpath, dnames, fnames in os.walk(filepath): for f in fnames: yield os.path.join(dirpath, f) + def is_sample(filename): return "sample" in os.path.basename(filename).lower() + def is_video(filename): return any(filename.lower().endswith(i) for i in extensions) + def exit(tempdir, status): shutil.rmtree(tempdir) sys.exit(status) @@ -167,37 +188,62 @@ def exit(tempdir, status): def get_jackett_indexers(): try: - response = requests.get(f"{JACKETT_URL}/api/v2.0/indexers/all/results/torznab/api?apikey={JACKETT_API_KEY}&t=indexers&configured=true") + response = requests.get( + f"{JACKETT_URL}/api/v2.0/indexers/all/results/torznab/api?apikey={JACKETT_API_KEY}&t=indexers&configured=true" + ) response.raise_for_status() xml_response = ET.fromstring(response.content) - indexers = [indexer.get('id') for indexer in xml_response.findall(".//indexer")] + indexers = [indexer.get("id") for indexer in xml_response.findall(".//indexer")] return indexers except requests.exceptions.RequestException as e: print(f"Error retrieving indexers: {e}") return [] + def search_torrents(query, indexer): torrents = [] try: - response = requests.get(f"{JACKETT_URL}/api/v2.0/indexers/{indexer}/results/torznab/api?apikey={JACKETT_API_KEY}&q={query}", timeout=int(TIMEOUT)) + response = requests.get( + f"{JACKETT_URL}/api/v2.0/indexers/{indexer}/results/torznab/api?apikey={JACKETT_API_KEY}&q={query}", + timeout=int(TIMEOUT), + ) response.raise_for_status() xml_response = ET.fromstring(response.content) items = xml_response.findall(".//item") for item in items: - title = item.find("title").text if item.find("title") is not None else "No Title" - link = item.find("link").text if item.find("link") is not None else "No Link" - seeds_str = item.find("*[@name='seeders']").get('value') if item.find("*[@name='seeders']") is not None else "0" - seeds_int = int(seeds_str) - size_bytes_str = item.find("size").text if item.find("size") is not None else "0" - size_bytes_int = int(size_bytes_str) - size_human_readable ='%.2f GB' % (size_bytes_int / (1024*1024*1024)) - title_with_tracker_name= f'{title} [{indexer}]' - torrents.append({'title': title_with_tracker_name, 'seeds': seeds_int , 'size': size_human_readable , 'link': link}) + title = ( + item.find("title").text + if item.find("title") is not None + else "No Title" + ) + link = ( + item.find("link").text if item.find("link") is not None else "No Link" + ) + seeds_str = ( + item.find("*[@name='seeders']").get("value") + if item.find("*[@name='seeders']") is not None + else "0" + ) + seeds_int = int(seeds_str) + size_bytes_str = ( + item.find("size").text if item.find("size") is not None else "0" + ) + size_bytes_int = int(size_bytes_str) + size_human_readable = "%.2f GB" % (size_bytes_int / (1024 * 1024 * 1024)) + title_with_tracker_name = f"{title} [{indexer}]" + torrents.append( + { + "title": title_with_tracker_name, + "seeds": seeds_int, + "size": size_human_readable, + "link": link, + } + ) except requests.exceptions.RequestException as e: - print(f"Error searching torrents for indexer {indexer}: {e}") + print(f"Error searching torrents for indexer {indexer}: {e}") return torrents @@ -206,36 +252,86 @@ def normalize_query(query): ascii_query = unidecode(query) return ascii_query + def search_torrents_threaded(query, indexer): - non_ascii_letters = ['á', 'é', 'í', 'ó', 'ú', - 'ü', 'ñ', 'ç', 'à', 'è', - 'ì', 'ò', 'ù', 'â', 'ê', - 'î', 'ô', 'û', 'ä', 'ë', - 'ï', 'ö', 'ü', 'ÿ', 'ø', - 'å', 'æ', 'œ', 'ß', 'ð', - 'þ', 'ł', 'ž', 'š', 'ý'] + non_ascii_letters = [ + "á", + "é", + "í", + "ó", + "ú", + "ü", + "ñ", + "ç", + "à", + "è", + "ì", + "ò", + "ù", + "â", + "ê", + "î", + "ô", + "û", + "ä", + "ë", + "ï", + "ö", + "ü", + "ÿ", + "ø", + "å", + "æ", + "œ", + "ß", + "ð", + "þ", + "ł", + "ž", + "š", + "ý", + ] if any(letter in query for letter in non_ascii_letters): ascii_query = normalize_query(query) - torrents = search_torrents(query, indexer) + search_torrents(ascii_query, indexer) + torrents = search_torrents(query, indexer) + search_torrents( + ascii_query, indexer + ) else: torrents = search_torrents(query, indexer) - torrents_unique = list({v['link']: v for v in torrents}.values()) + torrents_unique = list({v["link"]: v for v in torrents}.values()) return torrents_unique + def call_fzf_with_results(results): - with tempfile.NamedTemporaryFile(mode='w+', delete=True) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=True) as temp_file: for result in results: - temp_file.write(f"{result['title']}\t{result['seeds']}\t{result['size']}\t{result['link']}\n") + temp_file.write( + f"{result['title']}\t{result['seeds']}\t{result['size']}\t{result['link']}\n" + ) temp_file.flush() - selected = subprocess.check_output(['fzf', '--height=20', '--no-sort', '--delimiter', '\t', '--with-nth', '1,2,3', - "--preview", "echo {} | awk -F'\t' '{print \"\\033[1mName:\\033[0m \", $1, \"\\n\\033[1mSeeders:\\033[0m \", $2, \"\\n\\033[1mSize:\\033[0m \", $3}'", "--preview-window", "right:wrap", - '-q', ''], stdin=open(temp_file.name)) - - return selected.decode('utf-8').split('\t')[-1] + selected = subprocess.check_output( + [ + "fzf", + "--height=20", + "--no-sort", + "--delimiter", + "\t", + "--with-nth", + "1,2,3", + "--preview", + 'echo {} | awk -F\'\t\' \'{print "\\033[1mName:\\033[0m ", $1, "\\n\\033[1mSeeders:\\033[0m ", $2, "\\n\\033[1mSize:\\033[0m ", $3}\'', + "--preview-window", + "right:wrap", + "-q", + "", + ], + stdin=open(temp_file.name), + ) + return selected.decode("utf-8").split("\t")[-1] def scan(directory, indent=""): @@ -247,7 +343,9 @@ def scan(directory, indent=""): completed_files.extend(scan(absolute_path, indent + " ")) else: file_stat = os.stat(absolute_path) - progress = round(100.0 * 512.0 * file_stat.st_blocks / file_stat.st_size, 0) + progress = round( + 100.0 * 512.0 * file_stat.st_blocks / file_stat.st_size, 0 + ) if progress == 100: completed_files.append(absolute_path) except PermissionError: @@ -273,39 +371,44 @@ def read_log(log_file): total_pieces_downloaded = 0 first_piece_downloaded = False if os.path.exists(log): - with open(log_file, 'r') as f: + with open(log_file, "r") as f: for line in f.readlines(): - match = re.search(r'\((.*?)\)\[.*?\].*?received .*?peers: (\d+)', line) + match = re.search(r"\((.*?)\)\[.*?\].*?received .*?peers: (\d+)", line) if match: tracker = match.group(1) peers_count = int(match.group(2)) trackers[tracker] = peers_count - if re.search(r'piece.*finished downloading', line): + if re.search(r"piece.*finished downloading", line): total_pieces_downloaded += 1 - if re.search(r'piece: 0 finished downloading', line): + if re.search(r"piece: 0 finished downloading", line): first_piece_downloaded = True total_peers_counts_for_unique_trackers_last_occurrence = sum(trackers.values()) # total_downloaded_MBs = round(total_pieces_downloaded * .25,2) - output_str="" + output_str = "" if first_piece_downloaded: - output_str=Fore.GREEN + f"Peers: {total_peers_counts_for_unique_trackers_last_occurrence}; Downloaded {total_pieces_downloaded} pieces" + output_str = ( + Fore.GREEN + + f"Peers: {total_peers_counts_for_unique_trackers_last_occurrence}; Downloaded {total_pieces_downloaded} pieces" + ) else: - output_str=Fore.LIGHTBLACK_EX + f"Peers: {total_peers_counts_for_unique_trackers_last_occurrence}; Downloaded {total_pieces_downloaded} pieces" + output_str = ( + Fore.LIGHTBLACK_EX + + f"Peers: {total_peers_counts_for_unique_trackers_last_occurrence}; Downloaded {total_pieces_downloaded} pieces" + ) - - sys.stdout.write("\r"+ " "*80 + "\r"+output_str) + sys.stdout.write("\r" + " " * 80 + "\r" + output_str) sys.stdout.flush() - threading.Timer(2, read_log, [log_file]).start() # run every 2 seconds + threading.Timer(2, read_log, [log_file]).start() # run every 2 seconds def cleanup(mount_point): - with open(os.devnull, 'w') as DEVNULL: + with open(os.devnull, "w") as DEVNULL: subprocess.call(["fusermount", "-z", "-u", mount_point], stderr=DEVNULL) @@ -317,16 +420,30 @@ def cleanup_temp_files(): except Exception as e: print(f"Error deleting {filepath}: {e}") + atexit.register(cleanup_temp_files) + def main(): global log parser = argparse.ArgumentParser() parser.add_argument("-p", "--player", action="store", help="player to launch") - parser.add_argument('-k', '--keep', action='store_true', help='keep files and do not delete them') - parser.add_argument("-i", "--impd", action="store_true", help="add downloaded files into impd") - parser.add_argument("-t", "--title", action="store", help="search for alternative titles") - parser.add_argument("URI", nargs='?', action="store", help="magnet link or HTTP metadata URL to play", default="") + parser.add_argument( + "-k", "--keep", action="store_true", help="keep files and do not delete them" + ) + parser.add_argument( + "-i", "--impd", action="store_true", help="add downloaded files into impd" + ) + parser.add_argument( + "-t", "--title", action="store", help="search for alternative titles" + ) + parser.add_argument( + "URI", + nargs="?", + action="store", + help="magnet link or HTTP metadata URL to play", + default="", + ) args = parser.parse_args() if args.title: @@ -338,34 +455,54 @@ def main(): poster_urls = [srcset for srcset, _ in results] loaded_posters = load_images_threaded(poster_urls) - with tempfile.NamedTemporaryFile(mode='w+', delete=True) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=True) as temp_file: for (srcset, title), (poster_file, _) in zip(results, loaded_posters): temp_file.write(f"{poster_file}\t{title}\n") temp_file.flush() - selected_title = subprocess.check_output(['fzf', '--height=20', '--no-sort', - "--delimiter", '\t', - "--with-nth", '2', - "--preview", "echo {} | awk -F'\t' '{print $1}' | xargs -I{} sh -c 'chafa -s x20 --format=symbols {}'", - '-q', ''], stdin=open(temp_file.name)) - - - - - query = selected_title.decode('utf-8').strip().split('\t')[1] # Get only the title part from selection + selected_title = subprocess.check_output( + [ + "fzf", + "--height=20", + "--no-sort", + "--delimiter", + "\t", + "--with-nth", + "2", + "--preview", + "echo {} | awk -F'\t' '{print $1}' | xargs -I{} sh -c 'chafa -s x20 --format=symbols {}'", + "-q", + "", + ], + stdin=open(temp_file.name), + ) + + query = ( + selected_title.decode("utf-8").strip().split("\t")[1] + ) # Get only the title part from selection elif args.URI: query = args.URI uri = args.URI else: parser.error("No input provided. Use -t to search for titles or provide a URI.") - if not (query.startswith("magnet:") or query.endswith(".torrent") or query.startswith("http://127.0.0.1:9117")) and query: + if ( + not ( + query.startswith("magnet:") + or query.endswith(".torrent") + or query.startswith("http://127.0.0.1:9117") + ) + and query + ): indexers = get_jackett_indexers() all_torrents = [] - with tqdm(total=len(indexers), desc='Searching torrents', ncols=70) as pbar: + with tqdm(total=len(indexers), desc="Searching torrents", ncols=70) as pbar: with ThreadPoolExecutor(max_workers=20) as executor: - futures = {executor.submit(search_torrents_threaded, query, indexer): indexer for indexer in indexers} + futures = { + executor.submit(search_torrents_threaded, query, indexer): indexer + for indexer in indexers + } for future in concurrent.futures.as_completed(futures): torrents = future.result() all_torrents.extend(torrents) @@ -373,14 +510,14 @@ def main(): pbar.close() - all_torrents.sort(key=lambda x: x['seeds'], reverse=True) + all_torrents.sort(key=lambda x: x["seeds"], reverse=True) if all_torrents: - uri = call_fzf_with_results(all_torrents) - print(uri) + uri = call_fzf_with_results(all_torrents) + print(uri) else: - print("No torrents found.") - return + print("No torrents found.") + return if uri.startswith("http://127.0.0.1:9117"): response = get(uri, allow_redirects=False) @@ -399,8 +536,8 @@ def main(): print("Could not find a player", file=sys.stderr) return - mount_dir = os.path.join(os.environ['HOME'], '.cache', 'btstrm') - ddir = os.path.join(mount_dir, 'download') + mount_dir = os.path.join(os.environ["HOME"], ".cache", "btstrm") + ddir = os.path.join(mount_dir, "download") os.makedirs(mount_dir, exist_ok=True) os.makedirs(ddir, exist_ok=True) mountpoint = tempfile.mkdtemp(prefix="btstrm-", dir=mount_dir) @@ -409,12 +546,12 @@ def main(): atexit.register(lambda: cleanup(mountpoint)) # atexit.register(cleanup_temp_files) - - if args.keep: - failed=subprocess.call(["btfs","--keep",f"--data-directory={ddir}",uri,mountpoint]) + failed = subprocess.call( + ["btfs", "--keep", f"--data-directory={ddir}", uri, mountpoint] + ) else: - failed=subprocess.call(["btfs",f"--data-directory={ddir}",uri,mountpoint]) + failed = subprocess.call(["btfs", f"--data-directory={ddir}", uri, mountpoint]) if failed: exit(mountpoint, failed) @@ -424,16 +561,19 @@ def main(): while not os.listdir(mountpoint): time.sleep(0.25) - subdirs = [os.path.join(ddir, d) for d in os.listdir(ddir) if os.path.isdir(os.path.join(ddir, d))] + subdirs = [ + os.path.join(ddir, d) + for d in os.listdir(ddir) + if os.path.isdir(os.path.join(ddir, d)) + ] last_created_dir = max(subdirs, key=os.path.getmtime) log = last_created_dir + "/log.txt" - media = sorted( i for i in find_files(mountpoint) if not is_sample(i) and is_video(i) ) - mountpoint_removed = [m.replace(mountpoint, '') for m in media] + mountpoint_removed = [m.replace(mountpoint, "") for m in media] file_paths = [last_created_dir + "/files" + m for m in mountpoint_removed] for file_path in file_paths: @@ -441,7 +581,6 @@ def main(): read_log(log) - if media: status = subprocess.call(list(player) + media, stdin=sys.stdin) else: @@ -466,5 +605,7 @@ def main(): subprocess.call(["fusermount", "-z", "-u", mountpoint]) exit(mountpoint, status) + + if __name__ == "__main__": main()