Compare commits
No commits in common. "4d8c19db8f62c583a6d760041c429a831b77c4e2" and "970e63e19bcaa5aac1c6daea8df8f6dfcc31d183" have entirely different histories.
4d8c19db8f
...
970e63e19b
9 changed files with 35 additions and 168 deletions
10
dcc/check.py
Normal file
10
dcc/check.py
Normal file
|
@ -0,0 +1,10 @@
|
|||
import dcc.config
|
||||
import dcc.doom_base
|
||||
import wand.display
|
||||
import wand.image
|
||||
|
||||
class Check(dcc.doom_base.WadMap):
|
||||
def take_action(self, parsed_args):
|
||||
with wand.image.Image(filename=dcc.config.BaseThumbPath(parsed_args.wad, parsed_args.map)) as img:
|
||||
print("Image is {}x{}.".format(img.width, img.height))
|
||||
wand.display.display(img)
|
119
dcc/concat.py
119
dcc/concat.py
|
@ -1,119 +0,0 @@
|
|||
import av
|
||||
import copy
|
||||
import dcc.doom_base
|
||||
import fractions
|
||||
import io
|
||||
import logging
|
||||
import math
|
||||
import numpy as np
|
||||
import wand.image
|
||||
|
||||
class Concat(dcc.doom_base.Wad):
|
||||
def get_parser(self, prog_name):
|
||||
parser = super().get_parser(prog_name)
|
||||
parser.add_argument("start_map")
|
||||
parser.add_argument("end_map")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
logging.basicConfig()
|
||||
av.logging.set_level(av.logging.VERBOSE)
|
||||
av.logging.restore_default_callback()
|
||||
videos = self.fabricate.joinpath(parsed_args.wad).glob(f"{parsed_args.wad}_map*.mp4")
|
||||
output = av.open(self.fabricate.joinpath(parsed_args.wad).joinpath(f"{parsed_args.wad}_maps{parsed_args.start_map}to{parsed_args.end_map}.mp4"), "w")
|
||||
offset = 0
|
||||
# We'd like to use the concat filter here and connect everything into a
|
||||
# single filter graph... but it produces a "Resource temporarily
|
||||
# unavailable" error when switching to inputs after the first. Presumably
|
||||
# fixable, but it's easier to just make one graph per video and mux
|
||||
# everything together at the end.
|
||||
for v in sorted(videos):
|
||||
# TODO: Support UDoom in literally any way.
|
||||
if not (v.name >= f"{parsed_args.wad}_map{parsed_args.start_map}.mp4" and
|
||||
v.name <= f"{parsed_args.wad}_map{parsed_args.end_map}.mp4"):
|
||||
continue
|
||||
|
||||
chunk = av.open(v)
|
||||
ograph = av.filter.Graph()
|
||||
sink = ograph.add("buffersink")
|
||||
asink = ograph.add("abuffersink")
|
||||
|
||||
img = wand.image.Image(height=chunk.streams[0].height,width=chunk.streams[0].width)
|
||||
mapstring = v.name[-6:-4]
|
||||
text = self._config["map_names"][f"map{mapstring}"]
|
||||
dcc.text.draw_text(img, f"MAP{mapstring}: {text}", font_size=120)
|
||||
img.trim(reset_coords=True)
|
||||
img.border("graya(25%, 25%)", 10, 10)
|
||||
img.border(dcc.config.TEXT_STROKE_COLOR, 16, 16)
|
||||
# for this to work... the image needs to have a width that's a multiple
|
||||
# of 8. dude whyyyyyyy
|
||||
padfactor=8
|
||||
img.border("transparent", padfactor, 0)
|
||||
img.crop(width=img.width-img.width%padfactor, height=img.height)
|
||||
text_frame = av.video.frame.VideoFrame(img.width, img.height, format="rgba")
|
||||
|
||||
if len(output.streams.get()) == 0:
|
||||
# TODO: less hardcoding.
|
||||
output.add_stream("h264", rate=61440)
|
||||
output.streams[0].extradata = copy.deepcopy(chunk.streams[0].extradata)
|
||||
output.streams[0].height=1440
|
||||
output.streams[0].width=2560
|
||||
output.streams[0].profile="High"
|
||||
output.streams[0].qmax = chunk.streams[0].qmax
|
||||
output.streams[0].qmin = chunk.streams[0].qmin
|
||||
output.streams[0].codec_context.gop_size=30
|
||||
output.streams[0].codec_context.max_b_frames=2
|
||||
output.streams[0].codec_context.framerate = fractions.Fraction(60,1)
|
||||
output.streams[0].codec_context.pix_fmt="yuv420p"
|
||||
output.streams[0].codec_context.bit_rate = chunk.streams[0].codec_context.bit_rate
|
||||
output.add_stream("aac", rate=48000)
|
||||
output.streams[1].extradata = copy.deepcopy(output.streams[1].extradata)
|
||||
output.streams[1].rate=48000
|
||||
output.streams[1].bit_rate=chunk.streams[1].bit_rate
|
||||
src = ograph.add_buffer(template=chunk.streams[0], time_base=chunk.streams[0].time_base)
|
||||
asrc = ograph.add_abuffer(template=chunk.streams[1], time_base=chunk.streams[1].time_base)
|
||||
overlay = ograph.add_buffer(width=img.width, height=img.height, format="rgba", time_base=chunk.streams[0].time_base)
|
||||
overlay_fo = ograph.add("fade", args="out:240:60")
|
||||
overlay.link_to(overlay_fo, 0, 0)
|
||||
composite = ograph.add("overlay", args="x=4:y=4")
|
||||
src.link_to(composite, 0, 0)
|
||||
overlay_fo.link_to(composite, 0, 1)
|
||||
ifade = ograph.add("fade", args="in:0:60")
|
||||
iafade = ograph.add("afade", args="in:{}:48000".format(offset*48000/1000000))
|
||||
ofade = ograph.add("fade", args="out:{}:60".format((chunk.duration*60/1000000)-60))
|
||||
oafade = ograph.add("afade", args="out:{}:48000".format(((offset+chunk.duration)*48000/1000000)-48000))
|
||||
composite.link_to(ifade, 0, 0)
|
||||
asrc.link_to(iafade, 0, 0)
|
||||
ifade.link_to(ofade, 0, 0)
|
||||
iafade.link_to(oafade, 0, 0)
|
||||
ofade.link_to(sink, 0, 0)
|
||||
oafade.link_to(asink, 0, 0)
|
||||
|
||||
ograph.configure()
|
||||
for packet in chunk.demux():
|
||||
if packet.dts is None:
|
||||
continue
|
||||
packet.dts += (offset * packet.time_base.denominator) / (packet.time_base.numerator * 1000000)
|
||||
packet.pts += (offset * packet.time_base.denominator) / (packet.time_base.numerator * 1000000)
|
||||
if packet.stream_index == 0: # TODO: robustness
|
||||
for ifr in packet.decode():
|
||||
text_frame = av.video.frame.VideoFrame(img.width, img.height, format="rgba")
|
||||
text_frame.planes[0].update(img.make_blob(format="rgba"))
|
||||
text_frame.pts = ifr.pts
|
||||
text_frame.dts = ifr.dts
|
||||
text_frame.time_base = ifr.time_base
|
||||
overlay.push(text_frame)
|
||||
src.push(ifr)
|
||||
ofr = sink.pull()
|
||||
for p in output.streams[packet.stream_index].encode(ofr):
|
||||
output.mux(p)
|
||||
else:
|
||||
for ifr in packet.decode():
|
||||
asrc.push(ifr)
|
||||
ofr = asink.pull()
|
||||
for p in output.streams[packet.stream_index].encode(ofr):
|
||||
output.mux(p)
|
||||
offset += chunk.duration
|
||||
chunk.close()
|
||||
output.close()
|
||||
|
|
@ -32,10 +32,6 @@ class Base(Command):
|
|||
for d in ("iwads", "pwads", "demos", "fabricate"):
|
||||
self._init_path(d)
|
||||
|
||||
def run(self, parsed_args):
|
||||
self.init_base(parsed_args)
|
||||
self.take_action(parsed_args)
|
||||
|
||||
def _init_path(self, what):
|
||||
setattr(self, f"_{what}", self.doom.joinpath(self._config.get(what, what)))
|
||||
setattr(type(self), what, property(lambda self: getattr(self, f"_{what}")))
|
||||
|
|
|
@ -3,7 +3,6 @@ from cliff.command import Command
|
|||
import dcc.config
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import tomlkit
|
||||
|
||||
class Wad(dcc.config.Base):
|
||||
|
@ -18,12 +17,12 @@ class Wad(dcc.config.Base):
|
|||
wcp = self.pwads.joinpath(self.wad).joinpath(self.config_name)
|
||||
if wcp.exists():
|
||||
self._wad_config = tomlkit.toml_file.TOMLFile(wcp).read()
|
||||
self._config.update(self._wad_config.value)
|
||||
#for k,v in self._wad_config.value.items():
|
||||
#self._config.add(k,v)
|
||||
for k,v in self._wad_config.value.items():
|
||||
print(k,v)
|
||||
self._config.add(k,v)
|
||||
|
||||
def run(self, parsed_args):
|
||||
self.wad_init(parsed_args)
|
||||
self.wad_init(self, parsed_args)
|
||||
self.take_action(parsed_args)
|
||||
|
||||
@property
|
||||
|
@ -73,7 +72,7 @@ class WadMap(Wad):
|
|||
def complevel(self):
|
||||
complevel = self.pwads.joinpath(self.wad).joinpath("complevel")
|
||||
if not complevel.exists():
|
||||
raise Exception("No complevel set for wad {}.".format(self.wad))
|
||||
raise Exception("No complevel set in PWAD dir {}.".format(pwadpath))
|
||||
|
||||
with io.open(complevel) as f:
|
||||
return f.read().strip()
|
||||
|
|
|
@ -22,6 +22,7 @@ class Fabricate(dcc.doom_base.WadMap):
|
|||
list.append(options, f"{k}={v}")
|
||||
if len(options) > 0:
|
||||
options = ["-assign", ",".join(options)]
|
||||
print(options)
|
||||
subprocess.run(command + self.dsda_preamble() + options +
|
||||
["-timedemo", self.demo_in_path()] +
|
||||
["-viddump", self.video_path()])
|
||||
|
|
|
@ -17,9 +17,7 @@ class List(dcc.config.Base):
|
|||
case "demos":
|
||||
self.list(x.name for x in os.scandir(self.demos.joinpath(parsed_args.wad)) if x.name.endswith(".lmp"))
|
||||
case "videos":
|
||||
self.list(x.name for x in os.scandir(self.fabricate.joinpath(parsed_args.wad)) if x.name.endswith(".mp4"))
|
||||
case _:
|
||||
raise Exception(f"unknown target {parsed_args.target}")
|
||||
self.list(x.name for x in os.scandir(self.output.joinpath(parsed_args.wad)) if x.name.endswith(".mp4"))
|
||||
|
||||
def list(self, gen):
|
||||
# TODO: fancy text?
|
||||
|
|
34
dcc/text.py
34
dcc/text.py
|
@ -3,36 +3,32 @@ import sys
|
|||
import wand.drawing
|
||||
import wand.image
|
||||
|
||||
def draw_text(img, text, font_size=64):
|
||||
with wand.drawing.Drawing() as draw:
|
||||
draw.font = dcc.config.FONT
|
||||
draw.font_size=font_size
|
||||
draw.fill_color=wand.color.Color(dcc.config.TEXT_FILL_COLOR)
|
||||
draw.stroke_color=wand.color.Color(dcc.config.TEXT_STROKE_COLOR)
|
||||
draw.stroke_width=font_size*5/32
|
||||
draw.text_interline_spacing=-font_size/4
|
||||
draw.text(5,int(draw.font_size)+5,text)
|
||||
draw(img)
|
||||
draw.stroke_color=wand.color.Color("none")
|
||||
draw.stroke_width=0
|
||||
draw.text(5,int(draw.font_size)+5,text)
|
||||
draw(img)
|
||||
|
||||
|
||||
class Text(dcc.doom_base.WadMap):
|
||||
def get_parser(self, prog_name):
|
||||
parser = super().get_parser(prog_name)
|
||||
parser.add_argument("--nomap", action="store_true")
|
||||
parser.add_argument("--demotype", default="UV-Max Demo")
|
||||
parser.add_argument("--demotype", default="UV-Max")
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
text = sys.stdin.read().rstrip()
|
||||
if not parsed_args.nomap:
|
||||
text = "MAP{}: {}".format(parsed_args.map, text)
|
||||
text = "{}\n{}".format(text, parsed_args.demotype)
|
||||
text = "{}\n{} Demo".format(text, parsed_args.demotype)
|
||||
with wand.image.Image(height=dcc.config.THUMB_HEIGHT,width=dcc.config.THUMB_WIDTH) as img:
|
||||
draw_text(img, text)
|
||||
with wand.drawing.Drawing() as draw:
|
||||
draw.font = dcc.config.FONT
|
||||
draw.font_size=64
|
||||
draw.fill_color=wand.color.Color(dcc.config.TEXT_FILL_COLOR)
|
||||
draw.stroke_color=wand.color.Color(dcc.config.TEXT_STROKE_COLOR)
|
||||
draw.stroke_width=10
|
||||
draw.text_interline_spacing=-16
|
||||
draw.text(5,int(draw.font_size),text)
|
||||
draw(img)
|
||||
draw.stroke_color=wand.color.Color("none")
|
||||
draw.stroke_width=0
|
||||
draw.text(5,int(draw.font_size),text)
|
||||
draw(img)
|
||||
img.trim()
|
||||
img.reset_coords()
|
||||
img.save(filename=self.text_thumb_path())
|
||||
|
|
13
pyav_by8
13
pyav_by8
|
@ -1,13 +0,0 @@
|
|||
try to create an image with a custom buffer, format rbga
|
||||
image is 278x57
|
||||
got 63384 bytes, need 63840 bytes
|
||||
63384=278*57*4, checks out. why 63840?
|
||||
factor 63384=2*2*2*3*19*139
|
||||
factor 63840=2*2*2*2*2*3*5*7*19
|
||||
factor 278=2*139
|
||||
factor 57=3*19
|
||||
factor 4=2*2
|
||||
63840 doesn't have 139 as a factor... if we divide by the other two we get 2*2*2*5*7=280
|
||||
is it padding an extra pixel on each side? no, reducing video frame width to 276 still demands 63840 bytes.
|
||||
does stretching image to 280 work? yes! so it has to be a multiple of 5... or 10, or 20. and larger than the requested dimension.
|
||||
after some further fiddling, it's 8.
|
1
setup.py
1
setup.py
|
@ -36,7 +36,6 @@ setup(
|
|||
'eureka = dcc.eureka:Eureka',
|
||||
'ls = dcc.ls:List',
|
||||
'configure = dcc.configure:Configure',
|
||||
'concat = dcc.concat:Concat',
|
||||
],
|
||||
},
|
||||
zip_safe=False,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue