Spaces:
Paused
Paused
Upload 17 files
Browse files- .gitattributes +1 -0
- modules/__init__.py +0 -0
- modules/__pycache__/__init__.cpython-311.pyc +0 -0
- modules/__pycache__/frame_creator.cpython-311.pyc +0 -0
- modules/__pycache__/frame_decoder.cpython-311.pyc +0 -0
- modules/__pycache__/img_gen.cpython-311.pyc +0 -0
- modules/__pycache__/merge.cpython-311.pyc +0 -0
- modules/__pycache__/split.cpython-311.pyc +0 -0
- modules/__pycache__/video.cpython-311.pyc +0 -0
- modules/check.py +4 -0
- modules/frame_creator.py +70 -0
- modules/frame_decoder.py +38 -0
- modules/img_gen.py +96 -0
- modules/merge.py +22 -0
- modules/read_files.py +25 -0
- modules/split.py +31 -0
- modules/video.py +57 -0
- modules/x0.png +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
modules/x0.png filter=lfs diff=lfs merge=lfs -text
|
modules/__init__.py
ADDED
|
File without changes
|
modules/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (142 Bytes). View file
|
|
|
modules/__pycache__/frame_creator.cpython-311.pyc
ADDED
|
Binary file (3.65 kB). View file
|
|
|
modules/__pycache__/frame_decoder.cpython-311.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
modules/__pycache__/img_gen.cpython-311.pyc
ADDED
|
Binary file (5.46 kB). View file
|
|
|
modules/__pycache__/merge.cpython-311.pyc
ADDED
|
Binary file (1.93 kB). View file
|
|
|
modules/__pycache__/split.cpython-311.pyc
ADDED
|
Binary file (1.88 kB). View file
|
|
|
modules/__pycache__/video.cpython-311.pyc
ADDED
|
Binary file (3.41 kB). View file
|
|
|
modules/check.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from img_gen import gen_image,decode_img
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
decode_img("extracted_frames/0.png","x.txt")
|
modules/frame_creator.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .img_gen import gen_image, decode_img
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# Configure logging
|
| 9 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
def ensure_dir(directory):
|
| 14 |
+
"""
|
| 15 |
+
Ensures that the specified directory exists. If it does not exist, it creates the directory.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
directory (str): The path to the directory to ensure.
|
| 19 |
+
"""
|
| 20 |
+
if not os.path.exists(directory):
|
| 21 |
+
os.makedirs(directory)
|
| 22 |
+
print(f"Created directory: {directory}")
|
| 23 |
+
else:
|
| 24 |
+
print(f"Directory already exists: {directory}")
|
| 25 |
+
|
| 26 |
+
ensure_dir("./chunks")
|
| 27 |
+
ensure_dir("./images")
|
| 28 |
+
ensure_dir("./extracted_frames")
|
| 29 |
+
ensure_dir("./recovered")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def render_frame():
|
| 33 |
+
# 📁 Ensure the directories exist
|
| 34 |
+
if not os.path.exists("./chunks"):
|
| 35 |
+
logging.error("Error: 'chunks' directory does not exist. 🚫")
|
| 36 |
+
return
|
| 37 |
+
|
| 38 |
+
if not os.path.exists("./images"):
|
| 39 |
+
os.makedirs("./images")
|
| 40 |
+
logging.info("Created 'images' directory. 📁")
|
| 41 |
+
|
| 42 |
+
# List all files in the directory
|
| 43 |
+
file_list = os.listdir("./chunks")
|
| 44 |
+
logging.info(f"Found {len(file_list)} files in 'chunks' directory. 📂")
|
| 45 |
+
|
| 46 |
+
# Define a sorting key function that extracts the numeric part of the filename
|
| 47 |
+
def sort_key(filename):
|
| 48 |
+
match = re.match(r'(\d+)\.bin', filename)
|
| 49 |
+
if match:
|
| 50 |
+
return int(match.group(1))
|
| 51 |
+
return float('inf') # Ensure non-matching filenames are sorted last
|
| 52 |
+
|
| 53 |
+
# Sort the file list using the custom key function
|
| 54 |
+
file_list.sort(key=sort_key)
|
| 55 |
+
logging.info("Sorted file list. 🔢")
|
| 56 |
+
|
| 57 |
+
index = 0
|
| 58 |
+
for f in file_list:
|
| 59 |
+
if f.endswith('.bin'):
|
| 60 |
+
try:
|
| 61 |
+
# Construct the full path to the .bin file
|
| 62 |
+
bin_file_path = os.path.join("./chunks", f)
|
| 63 |
+
gen_image(bin_file_path, f"images/{index}.png")
|
| 64 |
+
logging.info(f"Processed file {f} to images/{index}.png 🖼️")
|
| 65 |
+
index += 1
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logging.error(f"Error processing file {f}: {e} ❌")
|
| 68 |
+
else:
|
| 69 |
+
logging.warning(f"Skipping non-bin file: {f} 🚫")
|
| 70 |
+
|
modules/frame_decoder.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from .img_gen import decode_img
|
| 3 |
+
|
| 4 |
+
def recover_binary_files(directory="extracted_frames", recovered_directory="recovered"):
|
| 5 |
+
"""
|
| 6 |
+
Recover binary files from PNG images in the specified directory.
|
| 7 |
+
|
| 8 |
+
Parameters:
|
| 9 |
+
directory (str): The directory containing the extracted frames.
|
| 10 |
+
recovered_directory (str): The directory to save the recovered binary files.
|
| 11 |
+
"""
|
| 12 |
+
# Ensure the recovered directory exists
|
| 13 |
+
if not os.path.exists(recovered_directory):
|
| 14 |
+
os.makedirs(recovered_directory)
|
| 15 |
+
|
| 16 |
+
# Get a list of all PNG files in the directory
|
| 17 |
+
png_files = sorted([f for f in os.listdir(directory) if f.endswith('.png')])
|
| 18 |
+
|
| 19 |
+
# Initialize a counter
|
| 20 |
+
counter = 0
|
| 21 |
+
|
| 22 |
+
# Iterate through the sorted list of PNG files
|
| 23 |
+
for png_file in png_files:
|
| 24 |
+
# Construct the full path to the PNG file
|
| 25 |
+
png_path = os.path.join(directory, png_file)
|
| 26 |
+
|
| 27 |
+
# Construct the output binary file name using the counter
|
| 28 |
+
bin_file = os.path.join(recovered_directory, f"{counter}.bin")
|
| 29 |
+
|
| 30 |
+
# Decode the image
|
| 31 |
+
decode_img(png_path, bin_file)
|
| 32 |
+
|
| 33 |
+
# Increment the counter
|
| 34 |
+
counter += 1
|
| 35 |
+
|
| 36 |
+
# Example usage
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
recover_binary_files()
|
modules/img_gen.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
import os
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
# Configure logging
|
| 8 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 9 |
+
|
| 10 |
+
def decode_img(image_path, output_file_path):
|
| 11 |
+
"""
|
| 12 |
+
Decode an image using the size header to remove padding.
|
| 13 |
+
"""
|
| 14 |
+
if not os.path.isfile(image_path):
|
| 15 |
+
raise FileNotFoundError(f"The image at {image_path} does not exist.")
|
| 16 |
+
|
| 17 |
+
img = cv2.imread(image_path)
|
| 18 |
+
if img is None:
|
| 19 |
+
raise ValueError(f"Could not read the image at {image_path}.")
|
| 20 |
+
|
| 21 |
+
height, width, _ = img.shape
|
| 22 |
+
binary_str = []
|
| 23 |
+
|
| 24 |
+
# Extract binary string from image
|
| 25 |
+
for y in tqdm(range(height), desc="Decoding image"):
|
| 26 |
+
for x in range(width):
|
| 27 |
+
b, g, r = img[y, x]
|
| 28 |
+
r_bit = '0' if r == 0 else '1'
|
| 29 |
+
g_bit = '0' if g == 0 else '1'
|
| 30 |
+
b_bit = '0' if b == 0 else '1'
|
| 31 |
+
binary_str.extend([r_bit, g_bit, b_bit])
|
| 32 |
+
|
| 33 |
+
binary_str = ''.join(binary_str)
|
| 34 |
+
|
| 35 |
+
# Split into bytes
|
| 36 |
+
byte_array = bytearray()
|
| 37 |
+
for i in range(0, len(binary_str), 8):
|
| 38 |
+
byte_bits = binary_str[i:i+8]
|
| 39 |
+
byte = int(byte_bits, 2)
|
| 40 |
+
byte_array.append(byte)
|
| 41 |
+
|
| 42 |
+
# Extract size from the first 4 bytes (header)
|
| 43 |
+
original_size = int.from_bytes(byte_array[:4], byteorder='big')
|
| 44 |
+
decoded_data = byte_array[4:4+original_size] # Truncate to original size
|
| 45 |
+
|
| 46 |
+
# Write to file
|
| 47 |
+
with open(output_file_path, 'wb') as f:
|
| 48 |
+
f.write(decoded_data)
|
| 49 |
+
|
| 50 |
+
logging.info(f"INFO! Decoded data written to {output_file_path}")
|
| 51 |
+
|
| 52 |
+
def gen_image(file_path, output_path, width=1280, height=720):
|
| 53 |
+
"""
|
| 54 |
+
Convert a file to a binary string (with size header) and encode it into an image.
|
| 55 |
+
"""
|
| 56 |
+
if not os.path.isfile(file_path):
|
| 57 |
+
raise FileNotFoundError(f"The file at {file_path} does not exist.")
|
| 58 |
+
|
| 59 |
+
with open(file_path, 'rb') as f:
|
| 60 |
+
binary_data = f.read()
|
| 61 |
+
|
| 62 |
+
# Add 4-byte header containing the original data size (big-endian)
|
| 63 |
+
original_size = len(binary_data)
|
| 64 |
+
size_header = original_size.to_bytes(4, byteorder='big')
|
| 65 |
+
binary_data = size_header + binary_data # Prepend header to data
|
| 66 |
+
|
| 67 |
+
# Convert to binary string
|
| 68 |
+
binary_str = ''.join(format(byte, '08b') for byte in binary_data)
|
| 69 |
+
|
| 70 |
+
# Calculate required length and pad with zeros
|
| 71 |
+
required_length = width * height * 3
|
| 72 |
+
if len(binary_str) > required_length:
|
| 73 |
+
raise ValueError(f"Data too large for image: {len(binary_str)} > {required_length}")
|
| 74 |
+
binary_str = binary_str.ljust(required_length, '0')
|
| 75 |
+
|
| 76 |
+
# Create and populate image
|
| 77 |
+
image = np.zeros((height, width, 3), dtype=np.uint8)
|
| 78 |
+
for y in tqdm(range(height), desc="Generating image"):
|
| 79 |
+
for x in range(width):
|
| 80 |
+
index = (y * width + x) * 3
|
| 81 |
+
r = int(binary_str[index], 2) * 255
|
| 82 |
+
g = int(binary_str[index + 1], 2) * 255
|
| 83 |
+
b = int(binary_str[index + 2], 2) * 255
|
| 84 |
+
image[y, x] = [b, g, r]
|
| 85 |
+
|
| 86 |
+
cv2.imwrite(output_path, image)
|
| 87 |
+
logging.info(f"INFO:👍 Image generated and saved to {output_path}")
|
| 88 |
+
|
| 89 |
+
if __name__ == "__main__":
|
| 90 |
+
# Example usage
|
| 91 |
+
input_file = 'x.png'
|
| 92 |
+
output_image = 'output.png'
|
| 93 |
+
decoded_file = 'decoded.txt'
|
| 94 |
+
|
| 95 |
+
gen_image(input_file, output_image)
|
| 96 |
+
decode_img(output_image, decoded_file)
|
modules/merge.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
def merge_cunks(chunk_dir, output_file):
|
| 4 |
+
"""Merges files from the 'chunks' directory into the original file."""
|
| 5 |
+
# Get a list of chunk files
|
| 6 |
+
chunk_files = sorted([f for f in os.listdir(chunk_dir) if f.endswith('.bin')])
|
| 7 |
+
|
| 8 |
+
with open(output_file, 'wb') as output_f:
|
| 9 |
+
for chunk_file in chunk_files:
|
| 10 |
+
chunk_path = os.path.join(chunk_dir, chunk_file)
|
| 11 |
+
with open(chunk_path, 'rb') as chunk_f:
|
| 12 |
+
output_f.write(chunk_f.read())
|
| 13 |
+
|
| 14 |
+
print(f"Merged {len(chunk_files)} chunks into '{output_file}'")
|
| 15 |
+
|
| 16 |
+
if __name__ == "__main__":
|
| 17 |
+
chunk_dir = "chunks" # Directory containing the chunk files
|
| 18 |
+
output_file = "merged_output.txt" # Name of the merged output file
|
| 19 |
+
|
| 20 |
+
print("Merging chunks...")
|
| 21 |
+
merge_cunks(chunk_dir, output_file)
|
| 22 |
+
print(f"Chunks merged into '{output_file}'")
|
modules/read_files.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
def file_to_binary(filepath):
|
| 4 |
+
"""
|
| 5 |
+
Reads a file in binary mode and converts its content into a binary (0s and 1s) string.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
filepath (str): The path to the file.
|
| 9 |
+
|
| 10 |
+
Returns:
|
| 11 |
+
str: A string representation of the file's binary content.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
with open(filepath, 'rb') as file:
|
| 15 |
+
binary_data = file.read()
|
| 16 |
+
# Convert bytes to binary string
|
| 17 |
+
binary_string = ''.join(format(byte, '08b') for byte in binary_data)
|
| 18 |
+
return binary_string
|
| 19 |
+
except FileNotFoundError:
|
| 20 |
+
print("Error: File not found.")
|
| 21 |
+
return None
|
| 22 |
+
except Exception as e:
|
| 23 |
+
print(f"Error: {e}")
|
| 24 |
+
return None
|
| 25 |
+
|
modules/split.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
def split_file(input_file, chunk_size_mb=0.329, output_dir="chunks"):
|
| 4 |
+
"""Splits a file into 2.6MB chunks and saves them in the 'chunks' directory."""
|
| 5 |
+
chunk_size_bytes = int(chunk_size_mb * 1024 * 1024) # Convert MB to Bytes
|
| 6 |
+
|
| 7 |
+
# Create chunks directory if it doesn't exist
|
| 8 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 9 |
+
|
| 10 |
+
chunk_files = []
|
| 11 |
+
with open(input_file, 'rb') as f:
|
| 12 |
+
chunk_index = 0
|
| 13 |
+
while True:
|
| 14 |
+
chunk = f.read(chunk_size_bytes)
|
| 15 |
+
if not chunk:
|
| 16 |
+
break
|
| 17 |
+
chunk_file = os.path.join(output_dir, f'{chunk_index}.bin')
|
| 18 |
+
with open(chunk_file, 'wb') as chunk_f:
|
| 19 |
+
chunk_f.write(chunk)
|
| 20 |
+
chunk_files.append(chunk_file)
|
| 21 |
+
chunk_index += 1
|
| 22 |
+
|
| 23 |
+
return chunk_files
|
| 24 |
+
|
| 25 |
+
if __name__ == "__main__":
|
| 26 |
+
input_file = "input.txt" # Ensure this file exists
|
| 27 |
+
output_dir = "chunks"
|
| 28 |
+
|
| 29 |
+
print("Splitting file...")
|
| 30 |
+
chunk_files = split_file(input_file, output_dir=output_dir)
|
| 31 |
+
print(f"File split into {len(chunk_files)} chunks, saved in '{output_dir}/'")
|
modules/video.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import hashlib
|
| 4 |
+
import numpy as np
|
| 5 |
+
from moviepy import ImageSequenceClip
|
| 6 |
+
|
| 7 |
+
def create_video_from_images(image_folder="images", output_video="videos/output_video.mp4", fps=30):
|
| 8 |
+
"""Create MP4 with lossless encoding using MoviePy"""
|
| 9 |
+
os.makedirs(os.path.dirname(output_video), exist_ok=True)
|
| 10 |
+
|
| 11 |
+
# Get sorted list of images
|
| 12 |
+
image_files = sorted([os.path.join(image_folder, f)
|
| 13 |
+
for f in os.listdir(image_folder) if f.endswith('.png')],
|
| 14 |
+
key=lambda x: int(os.path.basename(x).split('.')[0]))
|
| 15 |
+
|
| 16 |
+
# Create video with lossless settings
|
| 17 |
+
clip = ImageSequenceClip(image_files, fps=fps)
|
| 18 |
+
|
| 19 |
+
# FFmpeg parameters for lossless encoding
|
| 20 |
+
clip.write_videofile(
|
| 21 |
+
output_video,
|
| 22 |
+
codec='libx264rgb',
|
| 23 |
+
preset='ultrafast',
|
| 24 |
+
ffmpeg_params=[
|
| 25 |
+
'-crf', '0', # Lossless
|
| 26 |
+
'-color_range', '2', # Full color range
|
| 27 |
+
'-pix_fmt', 'bgr24' # Match OpenCV's BGR format
|
| 28 |
+
]
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def extract_frames_from_video(video_path="videos/output_video.mp4", output_folder="extracted_frames"):
|
| 35 |
+
"""Extract frames with OpenCV and hash verification"""
|
| 36 |
+
os.makedirs(output_folder, exist_ok=True)
|
| 37 |
+
|
| 38 |
+
cap = cv2.VideoCapture(video_path)
|
| 39 |
+
frame_count = 0
|
| 40 |
+
|
| 41 |
+
while True:
|
| 42 |
+
success, frame = cap.read()
|
| 43 |
+
if not success:
|
| 44 |
+
break
|
| 45 |
+
|
| 46 |
+
output_path = os.path.join(output_folder, f"{frame_count}.png")
|
| 47 |
+
cv2.imwrite(output_path, frame, [cv2.IMWRITE_PNG_COMPRESSION, 0])
|
| 48 |
+
|
| 49 |
+
with open(output_path, 'rb') as f:
|
| 50 |
+
current_hash = hashlib.sha256(f.read()).hexdigest()
|
| 51 |
+
print(f"Frame {frame_count:04d} hash: {current_hash}")
|
| 52 |
+
|
| 53 |
+
frame_count += 1
|
| 54 |
+
|
| 55 |
+
cap.release()
|
| 56 |
+
|
| 57 |
+
|
modules/x0.png
ADDED
|
Git LFS Details
|