diff --git a/.gitignore b/.gitignore
index 561a799..3b48301 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
/.conda
+/.venv
/dist
\ No newline at end of file
diff --git a/README.md b/README.md
index fb7c3e9..3647b54 100644
--- a/README.md
+++ b/README.md
@@ -11,6 +11,9 @@ This tool is perfect for users who want to standardize their media library's aud
`eac3-transcode` automates the often tedious process of inspecting video files, identifying specific audio tracks, and re-encoding them. It's designed to be smart about which tracks to process:
+* **⨠Optional GUI:**
+ * A user-friendly graphical interface is available for those who prefer not to use the command line. Launch it with `eac3-transcode --launch-gui`.
+
* **Scans Individual Files or Entire Directories:**
* Process a single video or batch-process an entire folder (including subfolders).
@@ -113,6 +116,24 @@ __*This will use all available CPU cores for maximum speed.*__
`eac3-transcode --input "/path/to/your/video_folder/" --force-reprocess`
+6. **Launching the GUI:**
+
+`eac3-transcode --launch-gui`
+
+The GUI provides access to all the same features as the command line, including:
+
+* Browse for input files or folders.
+
+* Browse for an output directory.
+
+* Adjust bitrate, languages, and job count.
+
+* Toggle "Dry Run" and "Force Reprocess".
+
+* Manually load a custom options.json config file.
+
+* A real-time log viewer to see the progress.
+
## Configuration
For convenience, the script supports a `options.json` file to set your preferred defaults.
@@ -164,6 +185,9 @@ An advanced video transcoder that processes files to use E-AC3 for specific audi
* `-h, --help`
Show this help message and exit.
+* `--launch-gui`
+ **(Optional)** Launch the graphical user interface.
+
* `-i INPUT_PATH, --input INPUT_PATH`
**(Required)** Path to the input video file or folder.
@@ -249,6 +273,7 @@ An advanced video transcoder that processes files to use E-AC3 for specific audi
## Contributing
[](http://makeapullrequest.com)
+<<<<<<< HEAD
[](https://github.com/jono-rams/surround-to-eac3/issues)
[](https://github.com/jono-rams/surround-to-eac3/pulls)
@@ -279,6 +304,40 @@ Contributions, issues, and feature requests are welcome! Feel free to check [iss
+
+=======
+[](https://github.com/jono-rams/surround-to-eac3/graphs/contributors)
+[](https://github.com/jono-rams/surround-to-eac3/issues)
+[](https://github.com/jono-rams/surround-to-eac3/pulls)
+
+Contributions, issues, and feature requests are welcome! Feel free to check [issues page](https://gitea.jono-rams.work/jono/ffmpeg-audio-transcoder/issues).
+>>>>>>> gui-patch
+
+## Contributors
+
+
+
+
+
+
+
+
+
## License
diff --git a/setup.cfg b/setup.cfg
index 9256b89..dda9254 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,10 @@
[metadata]
name = surround-to-eac3
+<<<<<<< HEAD
version = 0.4.2
+=======
+version = 0.5.0
+>>>>>>> gui-patch
author = Jonathan Rampersad
author_email = jonathan@jono-rams.work
description = A CLI tool to transcode 5.1 audio in video files to E-AC3.
@@ -26,6 +30,7 @@ python_requires = >=3.10
install_requires =
tqdm
platformdirs
+ customtkinter >= 5.0.0
[options.packages.find]
where=src
diff --git a/src/surround_to_eac3/gui.py b/src/surround_to_eac3/gui.py
new file mode 100644
index 0000000..f142f7a
--- /dev/null
+++ b/src/surround_to_eac3/gui.py
@@ -0,0 +1,416 @@
+import customtkinter as ctk
+import threading
+import sys
+import os
+import queue
+import concurrent.futures
+import argparse
+import shutil
+from tkinter import filedialog
+from tqdm import tqdm
+import json
+from platformdirs import user_config_dir
+
+# Import the processing functions from our new module
+try:
+ from . import processing
+except ImportError:
+ # Fallback for running file directly
+ import processing
+
+# --- Constants ---
+APP_NAME = "eac3-transcode"
+APP_AUTHOR = "eac3-transcode"
+CONFIG_FILENAME = "options.json"
+
+# --- Worker Initializer (needed for GUI thread pool) ---
+def worker_init(worker_id_queue):
+ """Assigns a unique ID to each worker thread for its progress bar."""
+ threading.current_thread().worker_id = worker_id_queue.get()
+
+
+class GuiLogger:
+ """A file-like object to redirect stdout/stderr to the GUI text box."""
+ def __init__(self, app, textbox):
+ self.app = app
+ self.textbox = textbox
+
+ def write(self, msg):
+ """Write message to the textbox, ensuring it's thread-safe."""
+
+ def _write_to_box():
+ """Internal function to run on the main thread."""
+ self.textbox.configure(state="normal")
+ self.textbox.insert("end", str(msg))
+ self.textbox.see("end") # Auto-scroll
+ self.textbox.configure(state="disabled")
+
+ # Use app.after to schedule the GUI update on the main thread
+ self.app.after(0, _write_to_box)
+
+ def flush(self):
+ """Required for file-like object interface."""
+ pass
+
+
+class TranscoderApp(ctk.CTk):
+ """Main GUI application window."""
+
+ def __init__(self):
+ super().__init__()
+
+ self.title("E-AC3 Transcoder")
+ self.geometry("800x600")
+ ctk.set_appearance_mode("system")
+
+ self.grid_columnconfigure(0, weight=1)
+ self.grid_rowconfigure(0, weight=1)
+ self.grid_rowconfigure(1, weight=0)
+
+ # --- Load Config File ---
+ default_config = self.load_default_config()
+
+ # --- Main Frame ---
+ self.main_frame = ctk.CTkFrame(self)
+ self.main_frame.grid(row=0, column=0, padx=10, pady=10, sticky="nsew")
+ self.main_frame.grid_columnconfigure(0, weight=1)
+ self.main_frame.grid_rowconfigure(1, weight=1) # Log box row
+
+ # --- Options Frame ---
+ self.options_frame = ctk.CTkFrame(self.main_frame)
+ self.options_frame.grid(row=0, column=0, padx=10, pady=10, sticky="ew")
+ self.options_frame.grid_columnconfigure(1, weight=1)
+
+ # --- Log Frame ---
+ self.log_frame = ctk.CTkFrame(self.main_frame)
+ self.log_frame.grid(row=1, column=0, padx=10, pady=(0, 10), sticky="nsew")
+ self.log_frame.grid_columnconfigure(0, weight=1)
+ self.log_frame.grid_rowconfigure(0, weight=1)
+
+ # --- Button Frame ---
+ self.button_frame = ctk.CTkFrame(self)
+ self.button_frame.grid(row=1, column=0, padx=10, pady=(0, 10), sticky="ew")
+ self.button_frame.grid_columnconfigure(0, weight=1)
+
+ # --- Widgets: Options ---
+ # Input Path
+ self.input_label = ctk.CTkLabel(self.options_frame, text="Input Path:")
+ self.input_label.grid(row=0, column=0, padx=10, pady=5, sticky="w")
+ self.input_entry = ctk.CTkEntry(self.options_frame, placeholder_text="Select a file or folder...")
+ self.input_entry.grid(row=0, column=1, padx=(0, 5), pady=5, sticky="ew")
+ self.input_file_button = ctk.CTkButton(self.options_frame, text="File...", width=80, command=self.select_input_file)
+ self.input_file_button.grid(row=0, column=2, padx=5, pady=5)
+ self.input_folder_button = ctk.CTkButton(self.options_frame, text="Folder...", width=80, command=self.select_input_folder)
+ self.input_folder_button.grid(row=0, column=3, padx=(0, 10), pady=5)
+
+ # Output Path
+ self.output_label = ctk.CTkLabel(self.options_frame, text="Output Dir:")
+ self.output_label.grid(row=1, column=0, padx=10, pady=5, sticky="w")
+ self.output_entry = ctk.CTkEntry(self.options_frame, placeholder_text="Optional (defaults to same as input)")
+ self.output_entry.grid(row=1, column=1, padx=(0, 5), pady=5, sticky="ew")
+ self.output_folder_button = ctk.CTkButton(self.options_frame, text="Select...", width=80, command=self.select_output_folder)
+ self.output_folder_button.grid(row=1, column=2, columnspan=2, padx=(0, 10), pady=5, sticky="ew")
+
+ # Bitrate
+ self.bitrate_label = ctk.CTkLabel(self.options_frame, text="Bitrate:")
+ self.bitrate_label.grid(row=2, column=0, padx=10, pady=5, sticky="w")
+ self.bitrate_entry = ctk.CTkEntry(self.options_frame)
+ self.bitrate_entry.grid(row=2, column=1, padx=(0, 10), pady=5, sticky="w")
+
+ # Languages
+ self.langs_label = ctk.CTkLabel(self.options_frame, text="Languages:")
+ self.langs_label.grid(row=3, column=0, padx=10, pady=5, sticky="w")
+ self.langs_entry = ctk.CTkEntry(self.options_frame)
+ self.langs_entry.grid(row=3, column=1, padx=(0, 10), pady=5, sticky="w")
+
+ # Jobs
+ self.jobs_label = ctk.CTkLabel(self.options_frame, text=f"Jobs (CPUs: {os.cpu_count()}):")
+ self.jobs_label.grid(row=4, column=0, padx=10, pady=5, sticky="w")
+ self.jobs_slider = ctk.CTkSlider(self.options_frame, from_=1, to=os.cpu_count(), number_of_steps=os.cpu_count() - 1, command=lambda v: self.jobs_value_label.configure(text=int(v)))
+ self.jobs_slider.grid(row=4, column=1, padx=(0, 10), pady=5, sticky="ew")
+ self.jobs_value_label = ctk.CTkLabel(self.options_frame, text=os.cpu_count(), width=30)
+ self.jobs_value_label.grid(row=4, column=2, padx=(0, 10), pady=5)
+
+ # Checkboxes
+ self.dry_run_var = ctk.IntVar()
+ self.dry_run_check = ctk.CTkCheckBox(self.options_frame, text="Dry Run (Analyze only)", variable=self.dry_run_var)
+ self.dry_run_check.grid(row=5, column=0, padx=10, pady=10, sticky="w")
+
+ self.force_reprocess_var = ctk.IntVar()
+ self.force_reprocess_check = ctk.CTkCheckBox(self.options_frame, text="Force Reprocess (Overwrite existing)", variable=self.force_reprocess_var)
+ self.force_reprocess_check.grid(row=5, column=1, padx=10, pady=10, sticky="w")
+
+ # Load Config Button
+ self.load_config_button = ctk.CTkButton(self.options_frame, text="Load Config...", width=80, command=self.load_config_from_file)
+ self.load_config_button.grid(row=5, column=3, padx=(0, 10), pady=10, sticky="e")
+
+
+ # --- Widgets: Log ---
+ self.log_textbox = ctk.CTkTextbox(self.log_frame, state="disabled", font=("Courier New", 12))
+ self.log_textbox.grid(row=0, column=0, padx=0, pady=0, sticky="nsew")
+
+ # --- Widgets: Buttons ---
+ self.start_button = ctk.CTkButton(self.button_frame, text="Start Processing", height=40, command=self.start_processing)
+ self.start_button.grid(row=0, column=0, padx=10, pady=5, sticky="ew")
+
+ # --- Member Variables ---
+ self.processing_thread = None
+
+ # --- Apply Initial Config ---
+ self.apply_config(default_config)
+
+ # --- Config Loader ---
+ def load_default_config(self) -> dict:
+ """Loads default config from file, mimicking main.py logic."""
+ user_config_dir_path = user_config_dir(APP_NAME, APP_AUTHOR)
+ user_config_file_path = os.path.join(user_config_dir_path, CONFIG_FILENAME)
+
+ potential_paths = [os.path.join(os.getcwd(), CONFIG_FILENAME), user_config_file_path]
+ config = {}
+
+ for path in potential_paths:
+ if os.path.exists(path):
+ try:
+ with open(path, 'r') as f:
+ config = json.load(f)
+ # We found the config, stop looking
+ break
+ except (json.JSONDecodeError, IOError):
+ # Config is corrupt, just use defaults
+ break
+ return config
+
+ def load_config_from_file(self):
+ """Opens a dialog to load a config .json file and applies it."""
+ path = filedialog.askopenfilename(
+ title="Load Config File",
+ filetypes=[("JSON files", "*.json"), ("All Files", "*.*")]
+ )
+ if not path:
+ return # User cancelled
+
+ try:
+ with open(path, 'r') as f:
+ config = json.load(f)
+ self.apply_config(config)
+
+ # Log success
+ self.log_textbox.configure(state="normal")
+ self.log_textbox.insert("1.0", f"â
Successfully loaded config from: {os.path.basename(path)}\n\n")
+ self.log_textbox.configure(state="disabled")
+
+ except (json.JSONDecodeError, IOError, Exception) as e:
+ # Log failure
+ self.log_textbox.configure(state="normal")
+ self.log_textbox.insert("1.0", f"đ¨ Error loading config: {e}\n\n")
+ self.log_textbox.configure(state="disabled")
+
+ def apply_config(self, config: dict):
+ """Applies a config dictionary to all the GUI fields."""
+
+ # Bitrate
+ self.bitrate_entry.delete(0, "end")
+ self.bitrate_entry.insert(0, config.get("audio_bitrate", "1536k"))
+
+ # Languages
+ self.langs_entry.delete(0, "end")
+ self.langs_entry.insert(0, config.get("languages", "eng,jpn"))
+
+ # Jobs
+ default_jobs = config.get("jobs", os.cpu_count())
+ self.jobs_slider.set(default_jobs)
+ self.jobs_value_label.configure(text=default_jobs)
+
+ # Checkboxes
+ self.dry_run_var.set(config.get("dry_run", 0))
+ self.force_reprocess_var.set(config.get("force_reprocess", 0))
+
+ # --- Button Callbacks ---
+ def select_input_file(self):
+ path = filedialog.askopenfilename(filetypes=[("Video Files", "*.mkv *.mp4"), ("All Files", "*.*")])
+ if path:
+ self.input_entry.delete(0, "end")
+ self.input_entry.insert(0, path)
+
+ def select_input_folder(self):
+ path = filedialog.askdirectory()
+ if path:
+ self.input_entry.delete(0, "end")
+ self.input_entry.insert(0, path)
+
+ def select_output_folder(self):
+ path = filedialog.askdirectory()
+ if path:
+ self.output_entry.delete(0, "end")
+ self.output_entry.insert(0, path)
+
+ # --- Processing Logic ---
+ def start_processing(self):
+ """Starts the transcoding job in a new thread."""
+ input_path = self.input_entry.get()
+ if not input_path:
+ self.log_textbox.configure(state="normal")
+ self.log_textbox.delete("1.0", "end")
+ self.log_textbox.insert("end", "đ¨ Error: Please select an input file or folder first.")
+ self.log_textbox.configure(state="disabled")
+ return
+
+ # Disable button, clear log
+ self.start_button.configure(state="disabled", text="Processing...")
+ self.log_textbox.configure(state="normal")
+ self.log_textbox.delete("1.0", "end")
+ self.log_textbox.configure(state="disabled")
+
+ # Start the job in a separate thread to keep the GUI responsive
+ self.processing_thread = threading.Thread(target=self.run_processing_job, daemon=True)
+ self.processing_thread.start()
+
+ def run_processing_job(self):
+ """
+ THE CORE PROCESSING LOOP - This runs on a worker thread.
+ It mimics the logic from `main.py` but uses the GUI logger.
+ """
+
+ # 1. Create a logger that writes to our GUI
+ gui_logger = GuiLogger(self, self.log_textbox)
+
+ # 2. Gather settings from GUI into a mock 'args' object
+ mock_args = argparse.Namespace(
+ input_path=self.input_entry.get(),
+ output_directory_base=self.output_entry.get() or None,
+ audio_bitrate=self.bitrate_entry.get(),
+ languages=self.langs_entry.get(),
+ jobs=int(self.jobs_slider.get()),
+ dry_run=bool(self.dry_run_var.get()),
+ force_reprocess=bool(self.force_reprocess_var.get())
+ )
+
+ # 3. Setup locks and queues for this job
+ tqdm_lock = threading.Lock()
+ worker_id_queue = queue.Queue()
+
+ # 4. File Discovery (mirrors main.py)
+ try:
+ input_path_abs = os.path.abspath(mock_args.input_path)
+ files_to_process_paths = []
+
+ if os.path.isdir(input_path_abs):
+ gui_logger.write(f"đ Scanning folder: {input_path_abs}\n")
+ for root, _, filenames in os.walk(input_path_abs):
+ for filename in filenames:
+ if filename.lower().endswith(processing.SUPPORTED_EXTENSIONS):
+ files_to_process_paths.append(os.path.join(root, filename))
+ if not files_to_process_paths:
+ gui_logger.write(" No .mkv or .mp4 files found.\n")
+ elif os.path.isfile(input_path_abs):
+ if input_path_abs.lower().endswith(processing.SUPPORTED_EXTENSIONS):
+ files_to_process_paths.append(input_path_abs)
+ else:
+ gui_logger.write(f"â ī¸ Provided file is not an .mkv or .mp4.\n")
+ else:
+ gui_logger.write(f"đ¨ Error: Input path is not a valid file or directory.\n")
+ self.processing_finished()
+ return
+
+ if not files_to_process_paths:
+ gui_logger.write("No files to process.\n")
+ self.processing_finished()
+ return
+
+ gui_logger.write(f"\nFound {len(files_to_process_paths)} file(s) to potentially process...\n")
+
+ stats = {
+ "processed": 0, "skipped_no_ops": 0, "skipped_no_transcode": 0,
+ "skipped_identical_path": 0, "skipped_existing": 0, "failed": 0
+ }
+
+ num_jobs = min(mock_args.jobs, len(files_to_process_paths))
+ for i in range(num_jobs):
+ worker_id_queue.put(i + 1) # TQDM positions 1, 2, 3...
+
+ # 5. Run ThreadPoolExecutor (mirrors main.py)
+ # The 'file=gui_logger' is the magic that redirects all tqdm output
+ with tqdm(total=len(files_to_process_paths), desc="Overall Progress", unit="file", ncols=100, smoothing=0.1, position=0, leave=True, file=gui_logger) as pbar:
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_jobs, initializer=worker_init, initargs=(worker_id_queue,)) as executor:
+
+ def submit_task(filepath):
+ """Wrapper to pass correct params to the processing function."""
+ worker_id = threading.current_thread().worker_id
+ return processing.process_single_file(
+ filepath, worker_id, mock_args, input_path_abs,
+ tqdm_lock, gui_logger # Pass the lock and GUI logger
+ )
+
+ future_to_path = {executor.submit(submit_task, path): path for path in files_to_process_paths}
+
+ for future in concurrent.futures.as_completed(future_to_path):
+ path = future_to_path[future]
+ try:
+ status = future.result()
+ if status in stats:
+ stats[status] += 1
+ else:
+ stats["failed"] += 1
+ with tqdm_lock:
+ tqdm.write(f"đ¨ UNKNOWN STATUS '{status}' for '{os.path.basename(path)}'.\n", file=gui_logger)
+ except Exception as exc:
+ with tqdm_lock:
+ tqdm.write(f"đ¨ CRITICAL ERROR during task for '{os.path.basename(path)}': {exc}\n", file=gui_logger)
+ stats["failed"] += 1
+ finally:
+ pbar.update(1)
+
+ # 6. Print Summary (mirrors main.py)
+ summary_title = "--- Dry Run Summary ---" if mock_args.dry_run else "--- Processing Summary ---"
+ processed_label = "Would be processed" if mock_args.dry_run else "Successfully processed"
+
+ summary = [
+ f"\n\n{summary_title}\n",
+ f"Total files checked: {len(files_to_process_paths)}\n",
+ f"â
{processed_label}: {stats['processed']}\n"
+ ]
+
+ total_skipped = stats['skipped_no_ops'] + stats['skipped_no_transcode'] + stats['skipped_identical_path'] + stats['skipped_existing']
+ summary.append(f"âī¸ Total Skipped: {total_skipped}\n")
+
+ if total_skipped > 0:
+ summary.append(f" - No target audio operations: {stats['skipped_no_ops']}\n")
+ summary.append(f" - No transcoding required (all copy): {stats['skipped_no_transcode']}\n")
+ summary.append(f" - Identical input/output path: {stats['skipped_identical_path']}\n")
+ summary.append(f" - Output file already exists: {stats['skipped_existing']}\n")
+
+ summary.append(f"đ¨ Failed to process: {stats['failed']}\n")
+ summary.append("--------------------------\n")
+ gui_logger.write("".join(summary))
+
+ except Exception as e:
+ gui_logger.write(f"\n\nđ¨ A CRITICAL ERROR occurred: {e}\n")
+ finally:
+ # 7. Re-enable the button on the main thread
+ self.processing_finished()
+
+ def processing_finished(self):
+ """Schedules the 'Start' button to be re-enabled on the main GUI thread."""
+ # Use self.after, not self.app.after, as 'self' is the app instance
+ self.after(0, lambda: self.start_button.configure(state="normal", text="Start Processing"))
+
+
+def launch():
+ """Entry point for launching the GUI."""
+ # Check for ffmpeg/ffprobe before launching
+ if not shutil.which("ffmpeg") or not shutil.which("ffprobe"):
+ ctk.set_appearance_mode("system")
+ root = ctk.CTk()
+ root.withdraw() # Hide the main window
+ # Simple message box
+ from tkinter import messagebox
+ messagebox.showerror(
+ "Missing Dependencies",
+ "Error: ffmpeg and/or ffprobe are not installed or not found in your system's PATH. Please install ffmpeg to use this tool."
+ )
+ root.destroy()
+ return
+
+ app = TranscoderApp()
+ app.mainloop()
+
diff --git a/src/surround_to_eac3/main.py b/src/surround_to_eac3/main.py
index 1d6e385..389e00a 100644
--- a/src/surround_to_eac3/main.py
+++ b/src/surround_to_eac3/main.py
@@ -6,323 +6,22 @@ import argparse
import json
import threading
import queue
+import sys
from tqdm import tqdm
from platformdirs import user_config_dir
+# --- Import refactored processing functions ---
+try:
+ from . import processing
+except ImportError:
+ # Fallback for running file directly
+ import processing
+
# --- Constants for Configuration ---
APP_NAME = "eac3-transcode"
APP_AUTHOR = "eac3-transcode"
CONFIG_FILENAME = "options.json"
-# Global lock for TQDM writes to prevent interleaving from multiple threads
-tqdm_lock = threading.Lock()
-SUPPORTED_EXTENSIONS = (".mkv", ".mp4")
-
-
-def get_video_duration(filepath: str) -> float:
- """Gets the duration of a video file in seconds."""
- if not shutil.which("ffprobe"):
- return 0.0
-
- command = [
- "ffprobe",
- "-v", "error",
- "-show_entries", "format=duration",
- "-of", "default=noprint_wrappers=1:nokey=1",
- filepath
- ]
- try:
- process = subprocess.run(command, capture_output=True, text=True, check=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
- return float(process.stdout.strip())
- except (subprocess.CalledProcessError, ValueError):
- return 0.0
-
-
-def get_stream_info(filepath: str, stream_type: str = "audio") -> tuple[list[dict], list[str]]:
- """
- Retrieves details for specified stream types (audio, video, subtitle) in a file.
- For audio, returns list of dicts with 'index', 'codec_name', 'channels', 'language'.
- For video/subtitle, returns list of dicts with 'index', 'codec_name'.
- """
- logs = []
- if not shutil.which("ffprobe"):
- logs.append(f" â ī¸ Warning: ffprobe is missing. Cannot get {stream_type} stream info for '{os.path.basename(filepath)}'.")
- return [], logs
-
- select_streams_option = {
- "audio": "a",
- "video": "v",
- "subtitle": "s"
- }.get(stream_type, "a") # Default to audio if type is unknown
-
- ffprobe_cmd = [
- "ffprobe", "-v", "quiet", "-print_format", "json",
- "-show_streams", "-select_streams", select_streams_option, filepath
- ]
-
- try:
- process = subprocess.run(
- ffprobe_cmd, capture_output=True, text=True, check=False,
- creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
- )
- if process.returncode != 0:
- # Non-critical error for this function, main processing will decide to skip/fail
- return [], logs
- if not process.stdout.strip():
- return [], logs # No streams of the selected type found
-
- data = json.loads(process.stdout)
- streams_details = []
- for stream in data.get("streams", []):
- detail = {
- "index": stream["index"], # Absolute stream index
- "codec_name": stream.get("codec_name", "unknown")
- }
- if stream_type == "audio":
- detail["channels"] = stream.get("channels")
- detail["language"] = stream.get("tags", {}).get("language", "und").lower()
- streams_details.append(detail)
- return streams_details, logs
- except json.JSONDecodeError:
- logs.append(f" â ī¸ Warning: Failed to decode ffprobe JSON for {stream_type} streams in '{os.path.basename(filepath)}'.")
- return [], logs
- except Exception as e:
- logs.append(f" â ī¸ Error getting {stream_type} stream info for '{os.path.basename(filepath)}': {e}")
- return [], logs
-
-
-def time_str_to_seconds(time_str: str) -> float:
- """Converts HH:MM:SS.ms time string to seconds."""
- parts = time_str.split(':')
- seconds = float(parts[-1])
- if len(parts) > 1:
- seconds += int(parts[-2]) * 60
- if len(parts) > 2:
- seconds += int(parts[-3]) * 3600
- return seconds
-
-
-def process_file_with_ffmpeg(
- input_filepath: str,
- final_output_filepath: str | None,
- audio_bitrate: str,
- audio_processing_ops: list[dict], # [{'index':X, 'op':'transcode'/'copy', 'lang':'eng'}]
- duration: float,
- pbar_position: int
-) -> tuple[bool, list[str]]:
- """
- Processes a single video file using ffmpeg, writing to a temporary file first.
- """
- logs = []
- if not shutil.which("ffmpeg"):
- logs.append(" đ¨ Error: ffmpeg is not installed or not found.")
- return False, logs
-
- # FFMpeg will write to a temporary file, which we will rename upon success
- temp_output_filepath = final_output_filepath + ".tmp"
- base_filename = os.path.basename(input_filepath)
- output_filename = os.path.basename(final_output_filepath)
-
- ffmpeg_cmd = ["ffmpeg", "-nostdin", "-i", input_filepath, "-map_metadata", "0"]
- map_operations = []
- output_audio_stream_ffmpeg_idx = 0 # For -c:a:0, -c:a:1 etc.
-
- # Map Video Streams
- map_operations.extend(["-map", "0:v?", "-c:v", "copy"])
- # Map Subtitle Streams
- map_operations.extend(["-map", "0:s?", "-c:s", "copy"])
-
- # Map Audio Streams based on operations
- for op_details in audio_processing_ops:
- map_operations.extend(["-map", f"0:{op_details['index']}"])
- if op_details['op'] == 'transcode':
- map_operations.extend([f"-c:a:{output_audio_stream_ffmpeg_idx}", "eac3", f"-b:a:{output_audio_stream_ffmpeg_idx}", audio_bitrate, f"-ac:a:{output_audio_stream_ffmpeg_idx}", "6", f"-metadata:s:a:{output_audio_stream_ffmpeg_idx}", f"language={op_details['lang']}"])
- elif op_details['op'] == 'copy':
- map_operations.extend([f"-c:a:{output_audio_stream_ffmpeg_idx}", "copy"])
- output_audio_stream_ffmpeg_idx += 1
-
- ffmpeg_cmd.extend(map_operations)
-
- if final_output_filepath.lower().endswith('.mkv'):
- ffmpeg_cmd.extend(['-f', 'matroska'])
- elif final_output_filepath.lower().endswith('.mp4'):
- ffmpeg_cmd.extend(['-f', 'mp4'])
-
- ffmpeg_cmd.extend(["-y", "-v", "quiet", "-stats_period", "1", "-progress", "pipe:1", temp_output_filepath])
-
- logs.append(f" âī¸ Processing: '{base_filename}' -> '{output_filename}'")
-
- process = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
-
- file_pbar = None
- if duration > 0:
- file_pbar = tqdm(total=int(duration), desc=f"ââ'{base_filename[:30]}âĻ'", position=pbar_position, unit='s', leave=False, ncols=100)
-
- for line in process.stdout:
- if "out_time_ms" in line:
- try:
- time_us = int(line.strip().split("=")[1])
- elapsed_seconds = time_us / 1_000_000
- update_amount = max(0, elapsed_seconds - file_pbar.n)
- if update_amount > 0:
- file_pbar.update(update_amount)
- except (ValueError, IndexError):
- continue
-
- process.wait()
- file_pbar.close()
-
- if process.returncode == 0:
- if os.path.exists(temp_output_filepath) and os.path.getsize(temp_output_filepath) > 0:
- os.rename(temp_output_filepath, final_output_filepath)
- logs.append(f" â
Success: '{output_filename}' saved.")
- return True, logs
- else:
- logs.append(f" â ī¸ Warning: ffmpeg reported success, but temp file is missing or empty.")
- return False, logs
- else:
- logs.append(f" đ¨ Error during ffmpeg processing for '{base_filename}'. RC: {process.returncode}")
- stderr_output = process.stderr.read()
- if stderr_output:
- logs.append(f" ffmpeg stderr:\n{stderr_output.strip()}")
- return False, logs
-
-
-def process_single_file(filepath: str, pbar_position: int, args: argparse.Namespace, input_path_abs: str) -> str:
- """
- Analyzes and processes a single file, managing temporary files for graceful exit.
- """
- file_specific_logs = []
- final_status = "failed"
-
-
- # Determine a display name relative to the initial input path for cleaner logs
- display_name = os.path.relpath(filepath, input_path_abs) if os.path.isdir(input_path_abs) else os.path.basename(filepath)
- file_specific_logs.append(f"âļī¸ Checked: '{display_name}'")
-
- target_languages = [lang.strip().lower() for lang in args.languages.split(',') if lang.strip()]
-
- audio_streams_details, get_info_logs = get_stream_info(filepath, "audio")
- file_specific_logs.extend(get_info_logs)
-
- audio_ops_for_ffmpeg = []
- if not audio_streams_details:
- file_specific_logs.append(" âšī¸ No audio streams found in this file.")
- else:
- for stream in audio_streams_details:
- lang = stream['language']
- op_to_perform = None
- channels_info = f"{stream.get('channels')}ch" if stream.get('channels') is not None else "N/Ach"
- codec_name = stream.get('codec_name', 'unknown')
-
- if lang in target_languages:
- is_5_1 = stream.get('channels') == 6
- is_not_ac3_eac3 = codec_name not in ['ac3', 'eac3']
- if is_5_1 and is_not_ac3_eac3:
- op_to_perform = 'transcode'
- file_specific_logs.append(f" đ Will transcode: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name})")
- else:
- op_to_perform = 'copy'
- reason_parts = [f"already {codec_name}" if codec_name in ['ac3', 'eac3'] else None, f"not 5.1 ({channels_info})" if stream.get('channels') != 6 else None]
- reason = ", ".join(filter(None, reason_parts)) or "meets other criteria for copying"
- file_specific_logs.append(f" đ Will copy: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name}) - Reason: {reason}")
- else:
- file_specific_logs.append(f" đ Will drop: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name}) - Not a target language.")
-
- if op_to_perform:
- audio_ops_for_ffmpeg.append({'index': stream['index'], 'op': op_to_perform, 'lang': lang})
-
- # First, check if there are any operations at all for target languages
- if not audio_ops_for_ffmpeg:
- file_specific_logs.append(f" âī¸ Skipping '{display_name}': No target audio streams to process (copy/transcode).")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- final_status = "skipped_no_ops"
- return final_status
-
- needs_transcode = any(op['op'] == 'transcode' for op in audio_ops_for_ffmpeg)
- if not needs_transcode:
- file_specific_logs.append(f" âī¸ Skipping '{display_name}': No transcoding required.")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- final_status = "skipped_no_transcode"
- return final_status
-
- # Determine final output path
- name, ext = os.path.splitext(os.path.basename(filepath))
- output_filename = f"{name}_eac3{ext}"
- output_dir_for_this_file = os.path.dirname(filepath) # Default to same directory
- if args.output_directory_base: # Input was a folder
- if os.path.isdir(input_path_abs):
- relative_dir = os.path.relpath(os.path.dirname(filepath), start=input_path_abs)
- output_dir_for_this_file = os.path.join(args.output_directory_base, relative_dir) if relative_dir != "." else args.output_directory_base
- else: # Input was a single file
- output_dir_for_this_file = args.output_directory_base
-
- final_output_filepath = os.path.join(output_dir_for_this_file, output_filename)
-
- # Check if the output file already exists and we are NOT forcing reprocessing.
- if os.path.exists(final_output_filepath) and not args.force_reprocess:
- file_specific_logs.append(f" âī¸ Skipping: Output file already exists. Use --force-reprocess to override.")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- final_status = "skipped_existing"
- return final_status
-
- # Check for identical paths before starting
- if os.path.abspath(filepath) == os.path.abspath(final_output_filepath):
- file_specific_logs.append(f" â ī¸ Warning: Input and output paths are identical. Skipping.")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- final_status = "skipped_identical_path"
- return final_status
-
- if args.dry_run:
- file_specific_logs.append(f" DRY RUN: Would process '{display_name}'. No changes will be made.")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- # We return 'processed' to indicate it *would* have been processed
- final_status = "processed"
- return final_status
-
- # Ensure output directory exists before processing
- if not os.path.isdir(output_dir_for_this_file):
- try:
- os.makedirs(output_dir_for_this_file, exist_ok=True)
- except OSError as e:
- file_specific_logs.append(f" đ¨ Error creating output directory '{output_dir_for_this_file}': {e}")
- with tqdm_lock:
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- return "failed"
-
- duration = get_video_duration(filepath)
- if duration == 0:
- file_specific_logs.append(f" â ī¸ Could not determine duration for '{display_name}'. Per-file progress will not be shown.")
-
- temp_filepath = final_output_filepath + ".tmp"
- try:
- success, ffmpeg_logs = process_file_with_ffmpeg(filepath, final_output_filepath, args.audio_bitrate, audio_ops_for_ffmpeg, duration, pbar_position)
- file_specific_logs.extend(ffmpeg_logs)
- final_status = "processed" if success else "failed"
- finally:
- # This block will run whether the try block succeeded, failed, or was interrupted.
- if os.path.exists(temp_filepath):
- try:
- os.remove(temp_filepath)
- except OSError as e:
- file_specific_logs.append(f" đ¨ Error cleaning up temp file '{temp_filepath}': {e}")
-
- with tqdm_lock: # Print all logs for this file at the very end of its processing
- for log_msg in file_specific_logs:
- tqdm.write(log_msg)
- return final_status
-
# Worker initializer to assign a unique position to each worker's progress bar
def worker_init(worker_id_queue):
@@ -330,6 +29,22 @@ def worker_init(worker_id_queue):
def main():
+ # --- GUI LAUNCHER ---
+ # Check for --launch-gui *before* parsing args
+ if "--launch-gui" in sys.argv:
+ print("Launching GUI...")
+ try:
+ from . import gui
+ gui.launch()
+ except ImportError as e:
+ print(f"đ¨ Error: GUI dependencies are not installed. {e}", file=sys.stderr)
+ print("Please run: pip install surround-to-eac3[gui]", file=sys.stderr)
+ except Exception as e:
+ # Catch other GUI-related errors (e.g., display not found)
+ print(f"đ¨ Error launching GUI: {e}", file=sys.stderr)
+ sys.exit() # Exit after launching or failing
+ # ---------------------
+
# Initial check for ffmpeg and ffprobe
if not shutil.which("ffmpeg") or not shutil.which("ffprobe"):
missing_tools = []
@@ -342,12 +57,19 @@ def main():
description="Advanced video transcoder: E-AC3 for specific audio, language filtering, folder processing.",
formatter_class=argparse.RawTextHelpFormatter
)
+ # Add the new --launch-gui argument
+ parser.add_argument(
+ "--launch-gui",
+ action="store_true",
+ help="Launch the graphical user interface."
+ )
parser.add_argument(
"-i", "--input",
required=True,
help="Path to the input video file or folder.",
dest="input_path"
)
+ # ... (all your other arguments: -o, -br, -l, -j, --dry-run, --force-reprocess) ...
parser.add_argument(
"-o", "--outdir",
help="Optional. Base directory to save processed files.\n"
@@ -384,16 +106,15 @@ def main():
action="store_true",
help="Force reprocessing of all files, even if an output file with the target name already exists."
)
-
- # --- Configuration File Logic ---
+
+ # --- Configuration File Logic (unchanged) ---
config = {}
-
user_config_dir_path = user_config_dir(APP_NAME, APP_AUTHOR)
user_config_file_path = os.path.join(user_config_dir_path, CONFIG_FILENAME)
if not os.path.exists(user_config_file_path):
try:
- defaults = {action.dest: action.default for action in parser._actions if action.dest != "help" and not action.required}
+ defaults = {action.dest: action.default for action in parser._actions if action.dest != "help" and not action.required and action.dest != "launch_gui"}
os.makedirs(user_config_dir_path, exist_ok=True)
with open(user_config_file_path, 'w') as f:
json.dump(defaults, f, indent=4)
@@ -415,7 +136,12 @@ def main():
break
parser.set_defaults(**config)
+
+ # Check for --input manually since it's no longer required by argparse
+ # to allow --launch-gui to work without it.
args = parser.parse_args()
+ if not args.input_path:
+ parser.error("-i/--input is required for CLI mode.")
if loaded_config_path:
print(f"â
Loaded configuration from: {loaded_config_path}")
@@ -423,19 +149,19 @@ def main():
if args.dry_run:
print("--- DRY RUN MODE ENABLED: No files will be modified. ---")
- # --- File Discovery ---
+ # --- File Discovery (unchanged) ---
input_path_abs = os.path.abspath(args.input_path)
files_to_process_paths = []
if os.path.isdir(input_path_abs):
print(f"đ Scanning folder: {input_path_abs}")
for root, _, filenames in os.walk(input_path_abs):
for filename in filenames:
- if filename.lower().endswith(SUPPORTED_EXTENSIONS):
+ if filename.lower().endswith(processing.SUPPORTED_EXTENSIONS):
files_to_process_paths.append(os.path.join(root, filename))
if not files_to_process_paths:
- print(" No .mkv or .mp4 files found in the specified folder.")
+ print(" No .mkv or .mp4 files found in the specified folder.")
elif os.path.isfile(input_path_abs):
- if input_path_abs.lower().endswith((".mkv", ".mp4")):
+ if input_path_abs.lower().endswith(processing.SUPPORTED_EXTENSIONS):
files_to_process_paths.append(input_path_abs)
else:
print(f"â ī¸ Provided file '{args.input_path}' is not an .mkv or .mp4 file. Skipping this input.")
@@ -448,27 +174,31 @@ def main():
return
print(f"\nFound {len(files_to_process_paths)} file(s) to potentially process...")
- # Initialize stats counters
stats = {
- "processed": 0,
- "skipped_no_ops": 0,
- "skipped_no_transcode": 0,
- "skipped_identical_path": 0,
- "skipped_existing": 0,
- "failed": 0
+ "processed": 0, "skipped_no_ops": 0, "skipped_no_transcode": 0,
+ "skipped_identical_path": 0, "skipped_existing": 0, "failed": 0
}
+ # --- Main Processing Loop ---
+ # We create the lock and queue here for the CLI job
+ tqdm_lock = threading.Lock()
worker_id_queue = queue.Queue()
- for i in range(args.jobs):
+ num_jobs = min(args.jobs, len(files_to_process_paths))
+ for i in range(num_jobs):
worker_id_queue.put(i + 1)
try:
- with tqdm(total=len(files_to_process_paths), desc="Overall Progress", unit="file", ncols=100, smoothing=0.1, position=0, leave=True) as pbar:
- with concurrent.futures.ThreadPoolExecutor(max_workers=args.jobs, initializer=worker_init, initargs=(worker_id_queue,)) as executor:
+ with tqdm(total=len(files_to_process_paths), desc="Overall Progress", unit="file", ncols=100, smoothing=0.1, position=0, leave=True, file=sys.stderr) as pbar:
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_jobs, initializer=worker_init, initargs=(worker_id_queue,)) as executor:
def submit_task(filepath):
+ """Wrapper to pass correct params to the processing function."""
worker_id = threading.current_thread().worker_id
- return process_single_file(filepath, worker_id, args, input_path_abs)
+ # We pass the lock and the standard sys.stderr writer
+ return processing.process_single_file(
+ filepath, worker_id, args, input_path_abs,
+ tqdm_lock, sys.stderr
+ )
future_to_path = {executor.submit(submit_task, path): path for path in files_to_process_paths}
@@ -481,23 +211,21 @@ def main():
else:
stats["failed"] += 1
with tqdm_lock:
- tqdm.write(f"đ¨ UNKNOWN STATUS '{status}' for '{os.path.basename(path)}'.")
+ tqdm.write(f"đ¨ UNKNOWN STATUS '{status}' for '{os.path.basename(path)}'.", file=sys.stderr)
except Exception as exc:
with tqdm_lock:
- tqdm.write(f"đ¨ CRITICAL ERROR during task for '{os.path.basename(path)}': {exc}")
+ tqdm.write(f"đ¨ CRITICAL ERROR during task for '{os.path.basename(path)}': {exc}", file=sys.stderr)
stats["failed"] += 1
finally:
pbar.update(1)
except KeyboardInterrupt:
- print("\n\nđ¨ Process interrupted by user. Shutting down gracefully... Any in-progress files have been cleaned up.")
- # The 'finally' blocks in each thread will handle cleanup.
- # Exiting here.
+ print("\n\nđ¨ Process interrupted by user. Shutting down gracefully...")
return
- # Print summary of operations
+ # --- Summary (unchanged) ---
summary_title = "--- Dry Run Summary ---" if args.dry_run else "--- Processing Summary ---"
- processed_label = "Would be processed" if args.dry_run else "Successfully processed"
+ processed_label = "Would be processed" if args.dry_row else "Successfully processed"
print()
print(f"\n{summary_title}")
@@ -506,9 +234,12 @@ def main():
total_skipped = stats['skipped_no_ops'] + stats['skipped_no_transcode'] + stats['skipped_identical_path'] + stats['skipped_existing']
print(f"âī¸ Total Skipped: {total_skipped}")
if total_skipped > 0:
- print(f" - No target audio operations: {stats['skipped_no_ops']}")
- print(f" - No transcoding required (all copy): {stats['skipped_no_transcode']}")
- print(f" - Identical input/output path: {stats['skipped_identical_path']}")
- print(f" - Output file already exists: {stats['skipped_existing']}")
+ print(f" - No target audio operations: {stats['skipped_no_ops']}")
+ print(f" - No transcoding required (all copy): {stats['skipped_no_transcode']}")
+ print(f" - Identical input/output path: {stats['skipped_identical_path']}")
+ print(f" - Output file already exists: {stats['skipped_existing']}")
print(f"đ¨ Failed to process: {stats['failed']}")
print("--------------------------")
+
+if __name__ == "__main__":
+ main()
diff --git a/src/surround_to_eac3/processing.py b/src/surround_to_eac3/processing.py
new file mode 100644
index 0000000..40739b6
--- /dev/null
+++ b/src/surround_to_eac3/processing.py
@@ -0,0 +1,304 @@
+import subprocess
+import os
+import shutil
+import json
+import sys
+from tqdm import tqdm
+
+# --- Constants ---
+SUPPORTED_EXTENSIONS = (".mkv", ".mp4")
+
+
+def get_video_duration(filepath: str) -> float:
+ """Gets the duration of a video file in seconds."""
+ if not shutil.which("ffprobe"):
+ return 0.0
+
+ command = [
+ "ffprobe",
+ "-v", "error",
+ "-show_entries", "format=duration",
+ "-of", "default=noprint_wrappers=1:nokey=1",
+ filepath
+ ]
+ try:
+ process = subprocess.run(command, capture_output=True, text=True, check=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
+ return float(process.stdout.strip())
+ except (subprocess.CalledProcessError, ValueError):
+ return 0.0
+
+
+def get_stream_info(filepath: str, stream_type: str = "audio") -> tuple[list[dict], list[str]]:
+ """
+ Retrieves details for specified stream types (audio, video, subtitle) in a file.
+ """
+ logs = []
+ if not shutil.which("ffprobe"):
+ logs.append(f" â ī¸ Warning: ffprobe is missing. Cannot get {stream_type} stream info for '{os.path.basename(filepath)}'.")
+ return [], logs
+
+ select_streams_option = {
+ "audio": "a",
+ "video": "v",
+ "subtitle": "s"
+ }.get(stream_type, "a")
+
+ ffprobe_cmd = [
+ "ffprobe", "-v", "quiet", "-print_format", "json",
+ "-show_streams", "-select_streams", select_streams_option, filepath
+ ]
+
+ try:
+ process = subprocess.run(
+ ffprobe_cmd, capture_output=True, text=True, check=False,
+ creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
+ )
+ if process.returncode != 0:
+ return [], logs
+ if not process.stdout.strip():
+ return [], logs
+
+ data = json.loads(process.stdout)
+ streams_details = []
+ for stream in data.get("streams", []):
+ detail = {
+ "index": stream["index"],
+ "codec_name": stream.get("codec_name", "unknown")
+ }
+ if stream_type == "audio":
+ detail["channels"] = stream.get("channels")
+ detail["language"] = stream.get("tags", {}).get("language", "und").lower()
+ streams_details.append(detail)
+ return streams_details, logs
+ except json.JSONDecodeError:
+ logs.append(f" â ī¸ Warning: Failed to decode ffprobe JSON for {stream_type} streams in '{os.path.basename(filepath)}'.")
+ return [], logs
+ except Exception as e:
+ logs.append(f" â ī¸ Error getting {stream_type} stream info for '{os.path.basename(filepath)}': {e}")
+ return [], logs
+
+
+def process_file_with_ffmpeg(
+ input_filepath: str,
+ final_output_filepath: str | None,
+ audio_bitrate: str,
+ audio_processing_ops: list[dict],
+ duration: float,
+ pbar_position: int,
+ tqdm_lock,
+ tqdm_file_writer=sys.stderr
+) -> tuple[bool, list[str]]:
+ """
+ Processes a single video file using ffmpeg, writing to a temporary file first.
+ """
+ logs = []
+ if not shutil.which("ffmpeg"):
+ logs.append(" đ¨ Error: ffmpeg is not installed or not found.")
+ return False, logs
+
+ temp_output_filepath = final_output_filepath + ".tmp"
+ base_filename = os.path.basename(input_filepath)
+ output_filename = os.path.basename(final_output_filepath)
+
+ ffmpeg_cmd = ["ffmpeg", "-nostdin", "-i", input_filepath, "-map_metadata", "0"]
+ map_operations = []
+ output_audio_stream_ffmpeg_idx = 0
+
+ map_operations.extend(["-map", "0:v?", "-c:v", "copy"])
+ map_operations.extend(["-map", "0:s?", "-c:s", "copy"])
+
+ for op_details in audio_processing_ops:
+ map_operations.extend(["-map", f"0:{op_details['index']}"])
+ if op_details['op'] == 'transcode':
+ map_operations.extend([f"-c:a:{output_audio_stream_ffmpeg_idx}", "eac3", f"-b:a:{output_audio_stream_ffmpeg_idx}", audio_bitrate, f"-ac:a:{output_audio_stream_ffmpeg_idx}", "6", f"-metadata:s:a:{output_audio_stream_ffmpeg_idx}", f"language={op_details['lang']}"])
+ elif op_details['op'] == 'copy':
+ map_operations.extend([f"-c:a:{output_audio_stream_ffmpeg_idx}", "copy"])
+ output_audio_stream_ffmpeg_idx += 1
+
+ ffmpeg_cmd.extend(map_operations)
+
+ if final_output_filepath.lower().endswith('.mkv'):
+ ffmpeg_cmd.extend(['-f', 'matroska'])
+ elif final_output_filepath.lower().endswith('.mp4'):
+ ffmpeg_cmd.extend(['-f', 'mp4'])
+
+ ffmpeg_cmd.extend(["-y", "-v", "quiet", "-stats_period", "1", "-progress", "pipe:1", temp_output_filepath])
+
+ logs.append(f" âī¸ Processing: '{base_filename}' -> '{output_filename}'")
+
+ process = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
+
+ file_pbar = None
+ if duration > 0:
+ file_pbar = tqdm(total=int(duration), desc=f"ââ'{base_filename[:30]}âĻ'", position=pbar_position, unit='s', leave=False, ncols=100, file=tqdm_file_writer)
+
+ for line in process.stdout:
+ if "out_time_us" in line:
+ try:
+ time_us = int(line.strip().split("=")[1])
+ elapsed_seconds = time_us / 1_000_000
+ if file_pbar:
+ update_amount = max(0, elapsed_seconds - file_pbar.n)
+ if update_amount > 0:
+ file_pbar.update(update_amount)
+ except (ValueError, IndexError):
+ continue
+
+ process.wait()
+ if file_pbar:
+ file_pbar.close()
+
+ if process.returncode == 0:
+ if os.path.exists(temp_output_filepath) and os.path.getsize(temp_output_filepath) > 0:
+ os.rename(temp_output_filepath, final_output_filepath)
+ logs.append(f" â
Success: '{output_filename}' saved.")
+ return True, logs
+ else:
+ logs.append(f" â ī¸ Warning: ffmpeg reported success, but temp file is missing or empty.")
+ if os.path.exists(temp_output_filepath):
+ os.remove(temp_output_filepath)
+ return False, logs
+ else:
+ logs.append(f" đ¨ Error during ffmpeg processing for '{base_filename}'. RC: {process.returncode}")
+ stderr_output = process.stderr.read()
+ if stderr_output:
+ logs.append(f" ffmpeg stderr:\n{stderr_output.strip()}")
+ return False, logs
+
+
+def process_single_file(
+ filepath: str,
+ pbar_position: int,
+ args: "argparse.Namespace",
+ input_path_abs: str,
+ tqdm_lock,
+ tqdm_file_writer=sys.stderr
+) -> str:
+ """
+ Analyzes and processes a single file, managing temporary files for graceful exit.
+ """
+ file_specific_logs = []
+ final_status = "failed"
+
+ display_name = os.path.relpath(filepath, input_path_abs) if os.path.isdir(input_path_abs) else os.path.basename(filepath)
+ file_specific_logs.append(f"âļī¸ Checked: '{display_name}'")
+
+ target_languages = [lang.strip().lower() for lang in args.languages.split(',') if lang.strip()]
+
+ audio_streams_details, get_info_logs = get_stream_info(filepath, "audio")
+ file_specific_logs.extend(get_info_logs)
+
+ audio_ops_for_ffmpeg = []
+ if not audio_streams_details:
+ file_specific_logs.append(" âšī¸ No audio streams found in this file.")
+ else:
+ for stream in audio_streams_details:
+ lang = stream['language']
+ op_to_perform = None
+ channels_info = f"{stream.get('channels')}ch" if stream.get('channels') is not None else "N/Ach"
+ codec_name = stream.get('codec_name', 'unknown')
+
+ if lang in target_languages:
+ is_5_1 = stream.get('channels') == 6
+ is_not_ac3_eac3 = codec_name not in ['ac3', 'eac3']
+ if is_5_1 and is_not_ac3_eac3:
+ op_to_perform = 'transcode'
+ file_specific_logs.append(f" đ Will transcode: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name})")
+ else:
+ op_to_perform = 'copy'
+ reason_parts = [f"already {codec_name}" if codec_name in ['ac3', 'eac3'] else None, f"not 5.1 ({channels_info})" if stream.get('channels') != 6 else None]
+ reason = ", ".join(filter(None, reason_parts)) or "meets other criteria for copying"
+ file_specific_logs.append(f" đ Will copy: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name}) - Reason: {reason}")
+ else:
+ file_specific_logs.append(f" đ Will drop: Audio stream #{stream['index']} ({lang}, {channels_info}, {codec_name}) - Not a target language.")
+
+ if op_to_perform:
+ audio_ops_for_ffmpeg.append({'index': stream['index'], 'op': op_to_perform, 'lang': lang})
+
+ if not audio_ops_for_ffmpeg:
+ file_specific_logs.append(f" âī¸ Skipping '{display_name}': No target audio streams to process (copy/transcode).")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ final_status = "skipped_no_ops"
+ return final_status
+
+ needs_transcode = any(op['op'] == 'transcode' for op in audio_ops_for_ffmpeg)
+ if not needs_transcode:
+ file_specific_logs.append(f" âī¸ Skipping '{display_name}': No transcoding required.")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ final_status = "skipped_no_transcode"
+ return final_status
+
+ name, ext = os.path.splitext(os.path.basename(filepath))
+ output_filename = f"{name}_eac3{ext}"
+ output_dir_for_this_file = os.path.dirname(filepath)
+ if args.output_directory_base:
+ if os.path.isdir(input_path_abs):
+ relative_dir = os.path.relpath(os.path.dirname(filepath), start=input_path_abs)
+ output_dir_for_this_file = os.path.join(args.output_directory_base, relative_dir) if relative_dir != "." else args.output_directory_base
+ else:
+ output_dir_for_this_file = args.output_directory_base
+
+ final_output_filepath = os.path.join(output_dir_for_this_file, output_filename)
+
+ if os.path.exists(final_output_filepath) and not args.force_reprocess:
+ file_specific_logs.append(f" âī¸ Skipping: Output file already exists. Use --force-reprocess to override.")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ final_status = "skipped_existing"
+ return final_status
+
+ if os.path.abspath(filepath) == os.path.abspath(final_output_filepath):
+ file_specific_logs.append(f" â ī¸ Warning: Input and output paths are identical. Skipping.")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ final_status = "skipped_identical_path"
+ return final_status
+
+ if args.dry_run:
+ file_specific_logs.append(f" DRY RUN: Would process '{display_name}'. No changes will be made.")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ final_status = "processed"
+ return final_status
+
+ if not os.path.isdir(output_dir_for_this_file):
+ try:
+ os.makedirs(output_dir_for_this_file, exist_ok=True)
+ except OSError as e:
+ file_specific_logs.append(f" đ¨ Error creating output directory '{output_dir_for_this_file}': {e}")
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ return "failed"
+
+ duration = get_video_duration(filepath)
+ if duration == 0:
+ file_specific_logs.append(f" â ī¸ Could not determine duration for '{display_name}'. Per-file progress will not be shown.")
+
+ temp_filepath = final_output_filepath + ".tmp"
+ try:
+ success, ffmpeg_logs = process_file_with_ffmpeg(
+ filepath, final_output_filepath, args.audio_bitrate,
+ audio_ops_for_ffmpeg, duration, pbar_position,
+ tqdm_lock, tqdm_file_writer
+ )
+ file_specific_logs.extend(ffmpeg_logs)
+ final_status = "processed" if success else "failed"
+ finally:
+ if os.path.exists(temp_filepath):
+ try:
+ os.remove(temp_filepath)
+ except OSError as e:
+ file_specific_logs.append(f" đ¨ Error cleaning up temp file '{temp_filepath}': {e}")
+
+ with tqdm_lock:
+ for log_msg in file_specific_logs:
+ tqdm.write(log_msg, file=tqdm_file_writer)
+ return final_status