Compare commits

...

6 commits

Author SHA1 Message Date
218928f033 PEP 8 compliance. 2025-06-15 02:16:59 -04:00
b54b865977 PEP 8 compliance. 2025-06-15 01:16:10 -04:00
3f1dad2b43 PEP 8 compliance. 2025-06-15 01:12:52 -04:00
e5656378ac PEP 8 compliance. 2025-06-15 01:11:01 -04:00
ff384678de PEP 8 compliance. 2025-06-14 22:40:05 -04:00
e8d06c616f Generally streamline and break the concat routine into functions.
Ensure the whole thing is PEP 8 compliant.  Also output a text file
summary for chapter markers.
2025-06-14 02:55:43 -04:00
6 changed files with 364 additions and 251 deletions

View file

@ -8,6 +8,7 @@ import math
import numpy as np import numpy as np
import wand.image import wand.image
class Concat(dcc.doom_base.Wad): class Concat(dcc.doom_base.Wad):
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = super().get_parser(prog_name) parser = super().get_parser(prog_name)
@ -20,120 +21,191 @@ class Concat(dcc.doom_base.Wad):
logging.basicConfig() logging.basicConfig()
av.logging.set_level(av.logging.VERBOSE) av.logging.set_level(av.logging.VERBOSE)
av.logging.restore_default_callback() av.logging.restore_default_callback()
videos = self.fabricate.joinpath(parsed_args.wad).glob(f"{parsed_args.wad}_map*.mp4") videos = (
output = av.open(self.fabricate.joinpath(parsed_args.wad).joinpath(f"{parsed_args.wad}_maps{parsed_args.start_map}to{parsed_args.end_map}.mp4"), "w") self.fabricate.joinpath(parsed_args.wad)
offset = 0 .glob(f"{parsed_args.wad}_map*.mp4")
)
fn_base = (
f"{parsed_args.wad}_maps{parsed_args.start_map}"
+ f"to{parsed_args.end_map}"
)
output = av.open(
self.fabricate.joinpath(parsed_args.wad).joinpath(
f"{fn_base}.mp4"), "w"
)
summary_file = open(
self.fabricate.joinpath(parsed_args.wad).joinpath(
f"{fn_base}.txt"), "w"
)
self._offset = 0
summary = []
# We'd like to use the concat filter here and connect everything into a # We'd like to use the concat filter here and connect everything into a
# single filter graph... but it produces a "Resource temporarily # single filter graph... but it produces a "Resource temporarily
# unavailable" error when switching to inputs after the first. Presumably # unavailable" error when switching to inputs after the first.
# fixable, but it's easier to just make one graph per video and mux # Presumably fixable, but it's easier to just make one graph per video
# everything together at the end. # and mux everything together at the end.
for v in sorted(videos): for v in sorted(videos):
# TODO: Support UDoom in literally any way. # TODO: Support UDoom in literally any way.
if not (v.name >= f"{parsed_args.wad}_map{parsed_args.start_map}.mp4" and if not (
v.name <= f"{parsed_args.wad}_map{parsed_args.end_map}.mp4"): v.name >= f"{parsed_args.wad}_map{parsed_args.start_map}.mp4"
and v.name <= f"{parsed_args.wad}_map{parsed_args.end_map}.mp4"
):
continue continue
start_time = self._offset / 1000000
chunk = av.open(v) text = self._add_chunk(v, output, not parsed_args.nooverlay)
if not (len(chunk.streams.video) == 1 and len(chunk.streams.audio) == 1): list.append(
raise Exception(f"irregular chunk {v}: streams {chunk.streams} (expected 1 video & 1 audio)") summary, f"{text} {math.floor(start_time / 60):02}:"
+ f"{math.floor(start_time % 60):02}"
ograph = av.filter.Graph() )
sink = ograph.add("buffersink")
asink = ograph.add("abuffersink")
if not parsed_args.nooverlay:
img = wand.image.Image(height=chunk.streams[0].height,width=chunk.streams[0].width)
mapstring = v.name[-6:-4]
text = self._config["map_names"][f"map{mapstring}"]
dcc.text.draw_text(img, f"MAP{mapstring}: {text}", font_size=120)
img.trim(reset_coords=True)
img.border("graya(25%, 25%)", 10, 10)
img.border(dcc.config.TEXT_STROKE_COLOR, 16, 16)
# for this to work... the image needs to have a width that's a multiple
# of 8. dude whyyyyyyy
padfactor=8
img.border("transparent", padfactor, 0)
img.crop(width=img.width-img.width%padfactor, height=img.height)
if len(output.streams.get()) == 0:
# We can't use the input stream as a template here; it doesn't
# have everything needed to do encoding and will fail
# mysteriously later.
vs = chunk.streams.video[0]
output.add_stream("h264", rate=int(vs.time_base.denominator/vs.time_base.numerator))
output.streams[0].extradata = copy.deepcopy(vs.extradata)
output.streams[0].height=vs.height
output.streams[0].width=vs.width
output.streams[0].qmax = vs.qmax
output.streams[0].qmin = vs.qmin
output.streams[0].codec_context.bit_rate = vs.codec_context.bit_rate
output.streams[0].codec_context.framerate = vs.base_rate
output.streams[0].codec_context.pix_fmt = vs.codec_context.pix_fmt
# The following are only used for encoding and have no equivalent on the input stream.
output.streams[0].profile="High"
output.streams[0].codec_context.gop_size=30
output.streams[0].codec_context.max_b_frames=2
astr = chunk.streams.audio[0]
output.add_stream("aac", rate=astr.rate)
output.streams[1].extradata = copy.deepcopy(astr.extradata)
output.streams[1].bit_rate=astr.bit_rate
src = ograph.add_buffer(template=chunk.streams.video[0], time_base=chunk.streams.video[0].time_base)
asrc = ograph.add_abuffer(template=chunk.streams.audio[0], time_base=chunk.streams.audio[0].time_base)
# TODO: video fades are absolute relative to the input video; audio
# fades need to have their timestamps offset by the position in the
# final video. Clarify if this is really necessary.
frame_rate = chunk.streams.video[0].base_rate
sample_rate = chunk.streams.audio[0].rate
ifade = ograph.add("fade", args="in:0:{}".format(frame_rate))
ofade = ograph.add("fade", args="out:{}:{}".format((chunk.duration*frame_rate/1000000)-frame_rate, frame_rate))
iafade = ograph.add("afade", args="in:{}:{}".format(offset*sample_rate/1000000, sample_rate))
oafade = ograph.add("afade", args="out:{}:{}".format(((offset+chunk.duration)*sample_rate/1000000)-sample_rate, sample_rate))
if not parsed_args.nooverlay:
overlay = ograph.add_buffer(width=img.width, height=img.height, format="rgba", time_base=chunk.streams[0].time_base)
overlay_fo = ograph.add("fade", args="out:{}:{}".format(4*frame_rate, frame_rate))
overlay.link_to(overlay_fo, 0, 0)
composite = ograph.add("overlay", args="x=4:y=4")
src.link_to(composite, 0, 0)
overlay_fo.link_to(composite, 0, 1)
composite.link_to(ifade, 0, 0)
else:
src.link_to(ifade, 0, 0)
asrc.link_to(iafade, 0, 0)
ifade.link_to(ofade, 0, 0)
iafade.link_to(oafade, 0, 0)
ofade.link_to(sink, 0, 0)
oafade.link_to(asink, 0, 0)
ograph.configure()
for packet in chunk.demux():
if packet.dts is None:
continue
packet.dts += (offset * packet.time_base.denominator) / (packet.time_base.numerator * 1000000)
packet.pts += (offset * packet.time_base.denominator) / (packet.time_base.numerator * 1000000)
if packet.stream == chunk.streams.video[0]:
for ifr in packet.decode():
if not parsed_args.nooverlay:
text_frame = av.video.frame.VideoFrame(img.width, img.height, format="rgba")
text_frame.planes[0].update(img.make_blob(format="rgba"))
text_frame.pts = ifr.pts
text_frame.dts = ifr.dts
text_frame.time_base = ifr.time_base
overlay.push(text_frame)
src.push(ifr)
ofr = sink.pull()
for p in output.streams[packet.stream_index].encode(ofr):
output.mux(p)
else:
for ifr in packet.decode():
asrc.push(ifr)
ofr = asink.pull()
for p in output.streams[packet.stream_index].encode(ofr):
output.mux(p)
offset += chunk.duration
chunk.close()
output.close() output.close()
for line in summary:
summary_file.write(f"{line}\n")
summary_file.close()
def _add_chunk(self, v, output, overlay):
chunk = av.open(v)
if not (len(chunk.streams.video) == 1
and len(chunk.streams.audio) == 1):
raise Exception(
f"irregular chunk {v}: streams {chunk.streams} "
+ f"(expected 1 video & 1 audio)"
)
ograph = av.filter.Graph()
sink = ograph.add("buffersink")
asink = ograph.add("abuffersink")
text = ""
if overlay:
img = wand.image.Image(
height=chunk.streams[0].height,
width=chunk.streams[0].width
)
mapstring = v.name[-6:-4]
text = self._config["map_names"][f"map{mapstring}"]
dcc.text.draw_text(img, f"MAP{mapstring}: {text}", font_size=120)
img.trim(reset_coords=True)
img.border("graya(25%, 25%)", 10, 10)
img.border(dcc.config.TEXT_STROKE_COLOR, 16, 16)
# for this to work... the image needs to have a width that's a
# multiple of 8. dude whyyyyyyy
padfactor = 8
img.border("transparent", padfactor, 0)
img.crop(
width=img.width - img.width % padfactor,
height=img.height
)
if len(output.streams.get()) == 0:
# We can't use the input stream as a template here; it doesn't
# have everything needed to do encoding and will fail
# mysteriously later.
vs = chunk.streams.video[0]
vr = int(vs.time_base.denominator/vs.time_base.numerator)
ovs = output.add_stream("h264", rate=vr)
ovs.extradata = copy.deepcopy(vs.extradata)
ovs.height = vs.height
ovs.width = vs.width
ovs.qmax = vs.qmax
ovs.qmin = vs.qmin
ovs.codec_context.bit_rate = vs.codec_context.bit_rate
ovs.codec_context.framerate = vs.base_rate
ovs.codec_context.pix_fmt = vs.codec_context.pix_fmt
# The following are only used for encoding and have no equivalent
# on the input stream.
ovs.profile = "High"
ovs.codec_context.gop_size = 30
ovs.codec_context.max_b_frames = 2
astr = chunk.streams.audio[0]
oas = output.add_stream("aac", rate=astr.rate)
oas.extradata = copy.deepcopy(astr.extradata)
oas.bit_rate = astr.bit_rate
src = ograph.add_buffer(
template=chunk.streams.video[0],
time_base=chunk.streams.video[0].time_base
)
asrc = ograph.add_abuffer(
template=chunk.streams.audio[0],
time_base=chunk.streams.audio[0].time_base
)
# TODO: video fades are absolute relative to the input video; audio
# fades need to have their timestamps offset by the position in the
# final video. Clarify if this is really necessary.
frame_rate = chunk.streams.video[0].base_rate
sample_rate = chunk.streams.audio[0].rate
ifade = ograph.add("fade", args="in:0:{}".format(frame_rate))
ofade_start = (chunk.duration * frame_rate / 1000000) - frame_rate
ofade = ograph.add("fade", args=f"out:{ofade_start}:{frame_rate}")
iafade_start = self._offset * sample_rate / 1000000
iafade = ograph.add("afade", args=f"in:{iafade_start}:{sample_rate}")
oafade_start = (
(self._offset + chunk.duration) * sample_rate / 1000000
- sample_rate
)
oafade = ograph.add("afade", args=f"out:{oafade_start}:{sample_rate}")
if overlay:
overlay = ograph.add_buffer(
width=img.width, height=img.height,
format="rgba", time_base=chunk.streams.video[0].time_base
)
overlay_fo = ograph.add(
"fade", args=f"out:{4 * frame_rate}:{frame_rate}"
)
overlay.link_to(overlay_fo, 0, 0)
composite = ograph.add("overlay", args="x=4:y=4")
src.link_to(composite, 0, 0)
overlay_fo.link_to(composite, 0, 1)
composite.link_to(ifade, 0, 0)
else:
src.link_to(ifade, 0, 0)
asrc.link_to(iafade, 0, 0)
ifade.link_to(ofade, 0, 0)
iafade.link_to(oafade, 0, 0)
ofade.link_to(sink, 0, 0)
oafade.link_to(asink, 0, 0)
ograph.configure()
for packet in chunk.demux():
if packet.dts is None:
continue
pof = (
(self._offset * packet.time_base.denominator)
/ (packet.time_base.numerator * 1000000)
)
packet.dts += pof
packet.pts += pof
if packet.stream == chunk.streams.video[0]:
for ifr in packet.decode():
if overlay:
overlay.push(self._make_text_frame(img, ifr))
src.push(ifr)
ofr = sink.pull()
for p in output.streams[packet.stream_index].encode(ofr):
output.mux(p)
else:
for ifr in packet.decode():
asrc.push(ifr)
ofr = asink.pull()
for p in output.streams[packet.stream_index].encode(ofr):
output.mux(p)
self._offset += chunk.duration
chunk.close()
return text
def _make_text_frame(self, img, ifr):
# We need to give each frame its own memory it can own.
text_frame = av.video.frame.VideoFrame(
img.width, img.height, format="rgba"
)
text_frame.planes[0].update(img.make_blob(format="rgba"))
text_frame.pts = ifr.pts
text_frame.dts = ifr.dts
text_frame.time_base = ifr.time_base
return text_frame

View file

@ -6,57 +6,62 @@ import tomlkit
from cliff.command import Command from cliff.command import Command
THUMB_WIDTH=1280 THUMB_WIDTH = 1280
THUMB_HEIGHT=720 THUMB_HEIGHT = 720
FONT="League-Spartan-Bold" FONT = "League-Spartan-Bold"
TEXT_FILL_COLOR="white" TEXT_FILL_COLOR = "white"
TEXT_STROKE_COLOR="srgb(176,0,0)" TEXT_STROKE_COLOR = "srgb(176,0,0)"
MIRROR="https://youfailit.net/pub/idgames" # NYC MIRROR = "https://youfailit.net/pub/idgames" # NYC
class Base(Command): class Base(Command):
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = super().get_parser(prog_name) parser = super().get_parser(prog_name)
parser.add_argument('--doom', default=pathlib.Path.home().joinpath("doom")) parser.add_argument(
parser.add_argument('--config-name', default='config.toml') '--doom', default=pathlib.Path.home().joinpath("doom"))
return parser parser.add_argument('--config-name', default='config.toml')
return parser
def init_base(self, parsed_args): def init_base(self, parsed_args):
self._doom = pathlib.Path(parsed_args.doom) self._doom = pathlib.Path(parsed_args.doom)
self._config_name = parsed_args.config_name self._config_name = parsed_args.config_name
self._config = tomlkit.toml_file.TOMLFile(self.doom.joinpath(self.config_name)).read() self._config = tomlkit.toml_file.TOMLFile(
self._dsda = self._config.get("dsda") self.doom.joinpath(self.config_name)).read()
if self.dsda is None: self._dsda = self._config.get("dsda")
raise Exception(f"required key 'dsda' not set in config {self.doom.joinpath(self.config_name)}.") if self.dsda is None:
for d in ("iwads", "pwads", "demos", "fabricate"): raise Exception(
self._init_path(d) "required key 'dsda' not set in config "
+ f"{self.doom.joinpath(self.config_name)}.")
for d in ("iwads", "pwads", "demos", "fabricate"):
self._init_path(d)
def run(self, parsed_args): def run(self, parsed_args):
self.init_base(parsed_args) self.init_base(parsed_args)
self.take_action(parsed_args) self.take_action(parsed_args)
def _init_path(self, what): def _init_path(self, what):
setattr(self, f"_{what}", self.doom.joinpath(self._config.get(what, what))) setattr(
setattr(type(self), what, property(lambda self: getattr(self, f"_{what}"))) self, f"_{what}", self.doom.joinpath(self._config.get(what, what)))
setattr(
type(self), what, property(lambda self: getattr(self, f"_{what}")))
@property @property
def doom(self): def doom(self):
return self._doom return self._doom
@property @property
def config_name(self): def config_name(self):
return self._config_name return self._config_name
@property @property
def dsda(self): def dsda(self):
return self._doom.joinpath(self._dsda) return self._doom.joinpath(self._dsda)
def iwad_path(self, wad):
iwad = self.iwads.joinpath(self._config.get("default_iwad"))
iwadpath = self.pwads.joinpath(wad).joinpath("iwad")
if iwadpath.exists():
with io.open(iwadpath) as f:
iwad = self.iwads.joinpath(f.read().strip() + ".WAD")
return iwad
def iwad_path(self, wad):
iwad = self.iwads.joinpath(self._config.get("default_iwad"))
iwadpath = self.pwads.joinpath(wad).joinpath("iwad")
if iwadpath.exists():
with io.open(iwadpath) as f:
iwad = self.iwads.joinpath(f.read().strip() + ".WAD")
return iwad

View file

@ -6,51 +6,59 @@ import shutil
import subprocess import subprocess
import zipfile import zipfile
class DSDA(dcc.doom_base.WadMap): class DSDA(dcc.doom_base.WadMap):
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = super().get_parser(prog_name) parser = super().get_parser(prog_name)
parser.add_argument("-s", "--single", action="store_true") parser.add_argument("-s", "--single", action="store_true")
parser.add_argument("-a", "--abbreviation") parser.add_argument("-a", "--abbreviation")
return parser return parser
def take_action(self, parsed_args): def take_action(self, parsed_args):
dip = self.demo_in_path() dip = self.demo_in_path()
dtp = self.dsda_text_path() dtp = self.dsda_text_path()
if not dtp.exists(): if not dtp.exists():
command = [self.dsda] command = [self.dsda]
if shutil.which("xvfb-run") is not None: if shutil.which("xvfb-run") is not None:
command = ["xvfb-run"] + command command = ["xvfb-run"] + command
# TODO: negative tics should seek from the end, but this doesn't seem to work. # TODO: negative tics should seek from the end, but this doesn't
subprocess.run(command + self.dsda_preamble() + # seem to work.
["-fastdemo", dip, "-nosound", "-skiptic", "999999999", "-export_text_file"]) subprocess.run(
editor = "nano" command + self.dsda_preamble() + [
if "EDITOR" in os.environ: "-fastdemo", dip, "-nosound", "-skiptic",
editor = os.environ["EDITOR"] "999999999", "-export_text_file"
subprocess.run([editor, dtp]) ]
if parsed_args.abbreviation: )
fh1 = parsed_args.abbreviation editor = "nano"
if not parsed_args.single: if "EDITOR" in os.environ:
fh1 += self.map editor = os.environ["EDITOR"]
else: subprocess.run([editor, dtp])
fh1 = self.wad[0:2] + self.map if parsed_args.abbreviation:
if parsed_args.single: fh1 = parsed_args.abbreviation
fh1 = self.wad[0:min(len(self.wad), 4)] if not parsed_args.single:
fh2 = "" fh1 += self.map
with open(dtp, mode="r") as f: else:
for line in f: fh1 = self.wad[0:2] + self.map
if line[0:4] == "Time": if parsed_args.single:
m = re.search("[^0-9]*([0-9]*):([0-9]*).[0-9]*", line) fh1 = self.wad[0:min(len(self.wad), 4)]
if m is None: fh2 = ""
continue with open(dtp, mode="r") as f:
fh2 = m[1]+m[2] for line in f:
if len(fh2)%2==1: if line[0:4] == "Time":
fh2 = "0" + fh2 m = re.search("[^0-9]*([0-9]*):([0-9]*).[0-9]*", line)
break if m is None:
if not fh2: continue
sys.exit("Failed to match any line in {} against Time regex.".format(dtp)) fh2 = m[1]+m[2]
if len(fh2) % 2 == 1:
fh2 = "0" + fh2
break
if not fh2:
sys.exit(f"Failed to match any line in {dtp} against Time regex.")
# TODO: demo names other than uv-max. # TODO: demo names other than uv-max.
fnf = fh1 + "-" + fh2 + ".zip" fnf = fh1 + "-" + fh2 + ".zip"
with zipfile.ZipFile(self.demos.joinpath(self.wad).joinpath(fnf), mode="w") as zf: with zipfile.ZipFile(
zf.write(dip, arcname=dip.name) self.demos.joinpath(self.wad).joinpath(fnf), mode="w"
zf.write(dtp, arcname=dtp.name) ) as zf:
zf.write(dip, arcname=dip.name)
zf.write(dtp, arcname=dtp.name)

View file

@ -2,18 +2,27 @@ import dcc.doom_base
import dcc.config import dcc.config
import subprocess import subprocess
class Eureka(dcc.doom_base.WadMap):
def take_action(self, parsed_args):
iwad = self.iwad_path(parsed_args.wad)
pwadpath = self.pwads.joinpath(parsed_args.wad)
mw = list(pwadpath.glob('*{}*.wad'.format(parsed_args.wad), case_sensitive=False))
if len(mw) != 1:
raise Exception("Unable to guess at main pwad for wad {}.".format(parsed_args.wad))
complevel = self.complevel()
port = "vanilla"
if complevel == "9":
port = "boom"
if complevel == "11" or complevel == "21":
port = "mbf"
subprocess.run(["eureka"] + ["-iwad", iwad] + ["-w", parsed_args.map] + ["-p", port] + [mw[0]]) class Eureka(dcc.doom_base.WadMap):
def take_action(self, parsed_args):
iwad = self.iwad_path(parsed_args.wad)
pwadpath = self.pwads.joinpath(parsed_args.wad)
mw = list(pwadpath.glob(
'*{}*.wad'.format(parsed_args.wad), case_sensitive=False
))
if len(mw) != 1:
raise Exception(
f"Unable to guess at main pwad for wad {parsed_args.wad}."
)
complevel = self.complevel()
port = "vanilla"
if complevel == "9":
port = "boom"
if complevel == "11" or complevel == "21":
port = "mbf"
subprocess.run(
["eureka"] + ["-iwad", iwad] + ["-w", parsed_args.map]
+ ["-p", port] + [mw[0]]
)

View file

@ -4,27 +4,43 @@ import numpy as np
import wand.color import wand.color
import wand.image import wand.image
class Extract(dcc.config.base): class Extract(dcc.config.base):
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = super().get_parser(prog_name) parser = super().get_parser(prog_name)
parser.add_argument('wad') parser.add_argument('wad')
parser.add_argument('lump') parser.add_argument('lump')
return parser return parser
def take_action(self, parsed_args): def take_action(self, parsed_args):
wads = sorted(self.pwads.joinpath(self.wad).glob('*.wad', case_sensitive=False), reverse=True) wads = sorted(
self.pwads.joinpath(self.wad).glob('*.wad', case_sensitive=False),
reverse=True
)
for w in wads: for w in wads:
try: try:
# TODO: handle anything other than graphics. # TODO: handle anything other than graphics.
wad = omg.WadIO(w) wad = omg.WadIO(w)
gl = omg.Graphic(wad.read(parsed_args.lump)) gl = omg.Graphic(wad.read(parsed_args.lump))
# With no arguments, convert() changes a paletted image to an RGB one. # With no arguments, convert() changes a paletted image to an
with wand.image.Image.from_array(np.array(gl.to_Image().convert())) as img: # RGB one.
img.transparent_color(wand.color.Color("#ff00ff"), 0.0) with wand.image.Image.from_array(
img.save(filename=self.output.joinpath(parsed_args.wad).joinpath(parsed_args.lump + ".png")) np.array(gl.to_Image().convert())
return ) as img:
except Exception as e: img.transparent_color(wand.color.Color("#ff00ff"), 0.0)
print("Wad {} likely has no lump {} (exception {}).".format(w, parsed_args.lump, e)) img.save(
filename=self.output.joinpath(parsed_args.wad)
.joinpath(parsed_args.lump + ".png")
)
return
except Exception as e:
print(
f"Wad {w} likely has no lump {parsed_args.lump}"
+ f"(exception {e})."
)
print("Lump {} not found in any wad in {}".format(parsed_args.lump, parsed_args.wad)) print(
"Lump {parsed_args.lump} not found in any wad in"
+ f"{parsed_args.wad}"
)

View file

@ -5,23 +5,26 @@ import shutil
import subprocess import subprocess
import tempfile import tempfile
class Fabricate(dcc.doom_base.WadMap):
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument("--fg", action="store_true")
return parser
def take_action(self, parsed_args): class Fabricate(dcc.doom_base.WadMap):
with tempfile.TemporaryDirectory() as td: def get_parser(self, prog_name):
with contextlib.chdir(td): parser = super().get_parser(prog_name)
command = [self.dsda] parser.add_argument("--fg", action="store_true")
if not parsed_args.fg and shutil.which("xvfb-run") is not None: return parser
command = ["xvfb-run"] + command
options = [] def take_action(self, parsed_args):
for k,v in self._config.get("fabricate_options", {}).items(): with tempfile.TemporaryDirectory() as td:
list.append(options, f"{k}={v}") with contextlib.chdir(td):
if len(options) > 0: command = [self.dsda]
options = ["-assign", ",".join(options)] if not parsed_args.fg and shutil.which("xvfb-run") is not None:
subprocess.run(command + self.dsda_preamble() + options + command = ["xvfb-run"] + command
["-timedemo", self.demo_in_path()] + options = []
["-viddump", self.video_path()]) for k, v in self._config.get("fabricate_options", {}).items():
list.append(options, f"{k}={v}")
if len(options) > 0:
options = ["-assign", ",".join(options)]
subprocess.run(
command + self.dsda_preamble() + options
+ ["-timedemo", self.demo_in_path()]
+ ["-viddump", self.video_path()]
)