--- /dev/null
+#!/bin/bash
+if file "$1" | grep -q executable; then
+ setexec + "$1"
+else
+ setexec - "$1"
+fi
--- /dev/null
+#!/usr/bin/env perl
+
+# Continuous mirroring script around inotifywait and rsync, as suggested by
+# Buck Huppmann. Supports local and remote pushing.
+# EXPERIMENTAL! THERE IS ABSOLUTELY NO WARRANTY!
+# -- Matt McCutchen <hashproduct@gmail.com>
+# See: http://www.kepreon.com/~matt/utils/#continusync
+
+# Usage:
+# continusync path/to/srcdir/ path/to/destdir/
+# continusync path/to/srcdir/ [user@]host:path/to/destdir/
+
+# It seems to work, but it runs rsync once per event, which is ridiculous.
+# TODO: Event batching!!!
+# TODO: Do the recursive deletion in perl instead of calling rm(1).
+
+use warnings;
+use strict;
+
+# Configuration. TODO: Add options for these.
+
+# Let the rsyncs we invoke piggyback on the main ssh connection.
+# For this to work, you have to set a ControlPath in your ~/.ssh/config ;
+# see the ssh_config(5) man page.
+our @rshArgs = ('ssh', '-o', 'ControlMaster auto');
+
+our $csPath = 'continusync';
+
+# Don't put -r or --delete here.
+# cp2 :)
+our @rsyncArgs = ('rsync', '-lE', '--chmod=ugo=rwX', '-i');
+
+use IPC::Open2;
+use IO::Handle;
+
+# readFully(fh, length) -> data
+sub readFully(*$) {
+ my ($fh, $bytesLeft) = @_;
+ my ($buf, $off, $rv) = ('', 0);
+ while ($bytesLeft > 0) {
+ $rv = sysread($fh, $buf, $bytesLeft, $off);
+ return undef if $rv == 0; # HMMM: May lose partial read
+ die "Read error" unless $rv > 0;
+ $bytesLeft -= $rv;
+ $off += $rv;
+ }
+ return $buf;
+}
+
+# writeFully(fh, data)
+sub writeFully(*$) {
+ my ($fh, $buf) = @_;
+ my ($bytesLeft, $off, $rv) = (length($buf), 0);
+ while ($bytesLeft > 0) {
+ $rv = syswrite($fh, $buf, $bytesLeft, $off);
+ die "Write error" unless $rv > 0;
+ $bytesLeft -= $rv;
+ $off += $rv;
+ }
+}
+
+# readMsg(fh) -> (type, body)
+sub readMsg(*) {
+ my ($fh) = @_;
+ my $head = readFully($fh, 8);
+ return (undef, undef) unless defined($head);
+ my ($type, $bodyLen) = unpack('NN', $head);
+ return ($type, readFully($fh, $bodyLen));
+}
+
+# writeMsg(fh, type, body)
+sub writeMsg(*$$) {
+ my ($fh, $type, $body) = @_;
+ writeFully($fh, pack('NN/a*', $type, $body));
+}
+
+# Message types
+#sub MSG_EXIT { 0; }
+sub MSG_REMOTE_PATH { 1; }
+sub MSG_PERFORMED { 2; }
+sub MSG_RENAME { 3; }
+sub MSG_DELREC { 4; }
+sub MSG_DELETED { 5; }
+
+sub doServer($) {
+ my ($dest) = @_;
+
+ chdir($dest);
+
+ my ($type, $body);
+ while (($type, $body) = readMsg(STDIN), defined($type)) {
+ if ($type == MSG_RENAME) {
+ my ($src, $dest) = unpack('N/a*N/a*', $body);
+ rename($src, $dest);
+ writeMsg(STDOUT, MSG_PERFORMED, '');
+ } elsif ($type == MSG_DELREC) {
+ my $victim = $body;
+ my ($rmPid, $fromRm);
+ $rmPid = open($fromRm, '-|', 'rm', '-rf', '-v', $victim);
+ my $rmLine;
+ while (defined($rmLine = <$fromRm>)) {
+ chomp($rmLine);
+ if ($rmLine =~ /^[^`]*`(.*)'[^']*$/) {
+ writeMsg(STDOUT, MSG_DELETED, $1);
+ }
+ }
+ close($fromRm);
+ waitpid($rmPid, 0);
+ writeMsg(STDOUT, MSG_PERFORMED, '');
+ }
+ }
+}
+
+# The stuff below applies only to the client.
+
+our ($src, $dest);
+our $localDestFH;
+
+our ($fromServer, $toServer, $serverPid);
+our ($fromInwt, $inwtPid);
+
+sub clientQuit() {
+ print "Caught a signal. Shutting down.\n";
+
+ #print STDOUT "serverPid is $serverPid\n";
+ close($fromServer);
+ close($toServer);
+ waitpid($serverPid, 0);
+
+ #print STDOUT "inwtPid is $inwtPid\n";
+ kill(2, $inwtPid);
+ close($fromInwt);
+ waitpid($inwtPid, 0);
+
+ exit(0);
+}
+
+sub doRsync($$@) {
+ my ($isRecursive, $isDelete, @paths) = @_;
+
+ my ($rsyncPid, $toRsync);
+ $rsyncPid = open($toRsync, '|-', @rsyncArgs,
+ ($isRecursive ? '-r' : '-d'), ($isDelete ? '--del' : ()),
+ '--no-implied-dirs', '-t', '--from0', '--files-from=-', '.', $dest);
+ foreach my $p (@paths) {
+ print $toRsync $p, "\0";
+ }
+ close($toRsync);
+ waitpid($rsyncPid, 0);
+}
+
+sub doRename($$) {
+ my ($src, $dest) = @_;
+ writeMsg($toServer, MSG_RENAME, pack('N/a*N/a*', $src, $dest));
+ readMsg($fromServer); # MSG_PERFORMED
+ print "*movefrom $src\n",
+ "*moveto $dest\n";
+}
+
+sub doDelete($) {
+ my ($path) = @_;
+ writeMsg($toServer, MSG_DELREC, $path);
+ my ($type, $body);
+ while (($type, $body) = readMsg($fromServer), $type == MSG_DELETED) {
+ print "*deleting $body\n";
+ }
+ # Also reads the final MSG_PERFORMED.
+}
+
+# move_self so we can reliably detect moves out
+our @interestingEvents = ('modify', 'attrib', 'move', 'move_self', 'create', 'delete');
+
+sub doClient($$) {
+ ($src, $dest) = @_;
+
+ print "Continusync starting up.\n",
+ "This software is EXPERIMENTAL. There is ABSOLUTELY NO WARRANTY.\n";
+
+ # Get a server process.
+ # Echoes of rsync...
+ if ($dest =~ /^([^:]*):(.*)$/) {
+ # Invoke over remote shell
+ my ($uhost, $rdest) = ($1, $2);
+ $serverPid = open2($fromServer, $toServer, @rshArgs, $uhost, $csPath, '--server');
+ # Pass path on stdin to stop the shell from messing with it.
+ # Echoes of rsync daemon protocol...
+ writeMsg($toServer, MSG_REMOTE_PATH, $rdest);
+ } else {
+ # Fork locally
+ my ($fromClient, $toClient);
+ pipe($fromServer, $toClient);
+ pipe($fromClient, $toServer);
+ $serverPid = fork();
+ if ($serverPid == 0) {
+ # Child server
+ close($fromServer);
+ close($toServer);
+ open(STDIN, "<&", $fromClient);
+ open(STDOUT, ">&", $toClient);
+ doServer($dest);
+ exit(0);
+ }
+ # Parent client
+ close($fromClient);
+ close($toClient);
+ # Get a dest path that we can pass to rsync even after we chdir into the source.
+ {
+ local $^F = 100000;
+ open($localDestFH, '<', $dest);
+ }
+ $dest = "/proc/self/fd/" . fileno($localDestFH);
+ }
+
+ chdir($src);
+
+ # Get inotifywait.
+ $inwtPid = open($fromInwt, '-|');
+ if ($inwtPid == 0) {
+ # Parent wants all our output on the single filehandle $fromInwt.
+ open(STDERR, ">&", STDOUT);
+ my @args = ('inotifywait', '-r', '-m', '--format', "%e\n%w\n%f", map(('-e', $_), @interestingEvents), '.');
+ exec(@args);
+ }
+
+ <$fromInwt>; # `Setting up watches'
+ <$fromInwt>; # `Watches established'
+ $SIG{INT} = \&clientQuit;
+ print "Continuously mirroring. Give me a SIGINT when you want me to quit.\n";
+
+ # Now we can do the initial copy without danger of losing events.
+ doRsync(1, 1, '.');
+
+ # Consecutive MOVED_FROM and MOVED_TO events constitute an internal
+ # move. A move-out followed by a move-in gives an intervening
+ # MOVED_SELF, so we aren't fooled.
+ my $movedFrom = undef;
+
+ for (;;) {
+ my ($e, $w, $f);
+ chomp($e = <$fromInwt>);
+ chomp($w = <$fromInwt>);
+ chomp($f = <$fromInwt>);
+ my $path = $w . $f;
+ $path =~ s,^\./(.),$1,; # Remove initial ./ if it isn't all
+ my $isDir = ($e =~ s/,ISDIR$//);
+ #print "Got event: ($e,$isDir,$w,$f)\n";
+
+ if (defined($movedFrom)) {
+ if ($e eq 'MOVED_TO') {
+ # Complete the move.
+ doRename($movedFrom, $path);
+ next;
+ } else {
+ # Moved out.
+ doDelete($movedFrom);
+ }
+ $movedFrom = undef;
+ }
+
+ if ($e eq 'MODIFY') {
+ doRsync(0, 0, $path);
+ } elsif ($e eq 'ATTRIB') {
+ doRsync(0, 0, $path);
+ } elsif ($e eq 'MOVED_FROM') {
+ $movedFrom = $path;
+ } elsif ($e eq 'MOVED_TO') {
+ # Moved in.
+ # Must be recursive in case it was an entire directory.
+ doRsync(1, 0, $path);
+ } elsif ($e eq 'CREATE') {
+ doRsync(0, 0, $path);
+ } elsif ($e eq 'DELETE') {
+ doDelete($path);
+ }
+ }
+ # not reached
+}
+
+if ($ARGV[0] eq '--server') {
+ #STDOUT->autoflush(1);
+ my ($type, $dest) = readMsg(STDIN);
+ doServer($dest);
+} else {
+ doClient($ARGV[0], $ARGV[1]);
+}
+
--- /dev/null
+#!/bin/bash
+rsync -rltE --chmod=ugo=rwX "$@"
--- /dev/null
+#!/bin/bash
+# dnf-repoquery-by-srpm [dnf-options...] srpm-name...
+#
+# List the NEVRA of every package built from one of the given source RPMs
+# (given by full name as in the SOURCERPM tag, e.g., glibc-2.31-4.fc32.src.rpm).
+#
+# This script's option parser is crude and assumes that every argument that
+# doesn't begin with a dash is a source RPM name, so you must use the
+# "--opt=val" form of dnf options, not "--opt val". Also, if you customize
+# the query format, it must not contain an embedded newline or \x01.
+#
+# Ideally "dnf repoquery" would support arbitrary query tags like rpm
+# (i.e., "rpm -qa SOURCERPM=...") and we wouldn't need this script.
+#
+# ~ Matt 2020-09-02
+
+set -e
+set -x
+set -o pipefail
+
+dnf_options=()
+orig_queryformat="%{name}-%{evr}.%{arch}"
+grep_options=()
+for arg in "$@"; do
+ case "$arg" in
+ (--qf=*)
+ orig_queryformat="${arg#--qf=}";;
+ (--queryformat=*)
+ orig_queryformat="${arg#--queryformat=}";;
+ (-*)
+ dnf_options+=("$arg");;
+ (*)
+ # Hope . is the only character special in a basic regular expression that
+ # occurs in package filenames.
+ grep_options+=(-e $'\x01'"${arg//./\.}\$");;
+ esac
+done
+
+real_queryformat="$orig_queryformat"$'\x01'"%{sourcerpm}"
+
+dnf repoquery --queryformat="$real_queryformat" "${dnf_options[@]}" \
+ | { grep "${grep_options[@]}" || true; } \
+ | sed -e $'s,\x01.*$,,'
--- /dev/null
+#!/bin/bash
+# Runs an isolated Firefox session in its own profile. - Matt 2007-12-02
+
+#set -x
+
+MDD=~/.mozilla/firefox
+
+function write_profiles_ini {
+ (
+ echo '[General]'
+ echo 'StartWithLastProfile=1'
+ echo
+ i=0
+ for dir in *.*; do
+ if [ -d "$dir" ]; then
+ name="${dir#*.}"
+ echo "[Profile$i]"
+ echo "Name=$name"
+ echo "IsRelative=1"
+ echo "Path=$dir"
+ echo
+ let i=$i+1
+ fi
+ done
+ ) >profiles.ini.tmp
+ mv profiles.ini.tmp profiles.ini
+}
+
+function new_profile {
+ name="isolated-$$-$(date +%s)"
+ cp2 00000000.master/ "00000000.$name/"
+ write_profiles_ini
+ echo "$name"
+}
+
+function liquidate_profile {
+ dir="00000000.$name"
+ cdir="changes_$name"
+ rsync -rl --compare-dest=../00000000.master --checksum "$dir/" "$cdir/"
+ rm -rf "$dir"
+ write_profiles_ini
+ find "$cdir" -type d -empty -delete
+ if [ -e "$cdir" ]; then
+ find_out="$(find "$cdir" -type f -print)"
+ zenity --info --text="This session changed the following files; you may wish to review the changes for application to the master profile:
+
+$find_out"
+ fi
+}
+
+function run_isolated {
+ ff="$1"
+ shift
+ name="$(cd "$MDD"; new_profile)"
+ zenity --info --text="Beginning isolated Firefox session $name"
+ "$ff" -no-remote -P "$name" "$@"
+ (cd "$MDD"; liquidate_profile "$name")
+}
+
+run_isolated /usr/bin/firefox "$@"
--- /dev/null
+#!/bin/bash
+# ftc <file>: file tree create
+set -e
+
+function ftc_sub {
+if [ -h "$1" ]; then
+ ltarget="$(readlink -- "$1" && echo 'x')"
+ ltarget="${ltarget%
+x}"
+ echo "l ${#ltarget} $ltarget"
+elif [ -f "$1" ]; then
+ if [ -x "$1" ]; then
+ stat --format=$'x %s' -- "$1"
+ else
+ stat --format=$'f %s' -- "$1"
+ fi
+ cat -- "$1"
+ echo
+elif [ -d "$1" ]; then
+ (
+ echo "{"
+ cd -- "$1"
+ unset GLOBIGNORE
+ shopt -s nullglob
+ shopt -s dotglob
+ for entry in *; do
+ echo -n "+ ${#entry} $entry "
+ ftc_sub "$entry"
+ done
+ echo "}"
+ )
+else
+ echo "File of unknown type!" 1>&2
+fi
+}
+
+ftc_sub "$1"
--- /dev/null
+#!/bin/bash
+# ftx <file>: file tree extract
+set -e
+
+function skip {
+ read -n $1 trash
+}
+
+function ftx_sub {
+ if [ -h "$1" ] || [ -a "$1" ]; then
+ echo "File already exists!" 1>&2
+ return 1
+ fi
+ read -n 1 type
+ case $type in
+ f|x)
+ skip 1 # space
+ read length
+ head -c "$length" >"$1"
+ if [ $type == x ]; then
+ setexec + "$1"
+ fi
+ skip 1 # newline
+ ;;
+ {)
+ skip 1 # newline
+ mkdir -- "$1"
+ (
+ cd -- "$1"
+ while read -n 1 what && [ $what == '+' ]; do
+ skip 1 # space
+ read -d ' ' length
+ read -d '' -n $length name
+ skip 1 # space
+ ftx_sub "$name"
+ done
+ skip 1 # newline
+ )
+ ;;
+ l)
+ skip 1 # space
+ read -d ' ' length
+ read -d '' -n $length target
+ skip 1 # newline
+ ln -s -- "$target" "$1"
+ ;;
+ esac
+}
+
+ftx_sub "$1"
--- /dev/null
+#!/bin/bash
+# usage: gitar foo-dir >foo.gitar
+
+set -e
+trap 'echo "Unexpected error!
+I am leaving the .git subdirectory around so you can troubleshoot;
+delete the subdirectory before trying to gitar again." 1>&2' ERR
+cd "$1"
+
+if [ -e '.git' ]; then
+ echo 'The source directory is already a git repository!' 1>&2
+ exit 1
+fi
+
+if ! find . -type d -empty | cmp /dev/null - >/dev/null; then
+ echo 'WARNING: The source directory contains empty directories, and git will drop them.' 1>&2
+fi
+
+# Make repository.
+git-init-db >/dev/null
+
+# Make a dummy commit to hold all the files.
+function list-files-to-add {
+ find . -wholename './.git' -prune -or '(' -type f -or -type l ')' -printf '%P\n'
+}
+list-files-to-add | git-update-index --add --stdin >/dev/null
+tree=$(git-write-tree)
+function clean-commit {
+ GIT_AUTHOR_NAME='reproducible' GIT_AUTHOR_EMAIL='' GIT_AUTHOR_DATE='946684801 +0000' GIT_COMMITTER_NAME='reproducible' GIT_COMMITTER_EMAIL='' GIT_COMMITTER_DATE='946684801 +0000' git-commit-tree "$@" </dev/null
+}
+clean-commit $tree >.git/refs/heads/master
+
+# Pack things up nicely.
+git-repack -a >/dev/null
+for i in pack idx; do
+ mv .git/objects/pack/{pack*.$i,pack.$i}
+done
+git-prune >/dev/null
+
+# Write out git repository as a Matt-style file tree.
+function write_file {
+ echo -n "+ ${#2} $2 "
+ stat --format=$'f %s' -- "$1/$2"
+ cat -- "$1/$2"
+ echo
+}
+echo '{'
+ echo '+ 4 HEAD f 23'
+ echo 'ref: refs/heads/master'
+ echo
+ echo '+ 4 refs {'
+ echo '+ 5 heads {'
+ write_file .git/refs/heads master
+ echo '}'
+ echo '}'
+ echo '+ 7 objects {'
+ echo '+ 4 pack {'
+ write_file .git/objects/pack pack.pack
+ write_file .git/objects/pack pack.idx
+ echo '}'
+ echo '}'
+echo '}'
+
+rm -rf .git
--- /dev/null
+#!/bin/bash
+# Matt's improved version of Fedora's lesspipe.sh
+#
+# To use this filter with less, define LESSOPEN:
+# export LESSOPEN="|lesspipe.sh %s"
+
+# Updated for "less" that checks our exit code ~ Matt 2010-12-28
+
+# This script is entirely driven by the file content, with type detected by
+# file(1). For reproducibility and to avoid problems with strange names and
+# even symlinks, we do not use the original name of the file for anything.
+
+set -e
+set -o pipefail
+#set -x
+
+# Attach the file to an FD. We will reopen /dev/fd/$f to get as many fresh FDs as
+# we need.
+exec {f}<"$1"
+shift
+
+function run_stream_stdin {
+ if [ "$decompressor" == cat ]; then
+ <&$f "$@"
+ else
+ <&$f "$decompressor" | "$@"
+ fi
+}
+
+function stage_to_file_stdin {
+ if [ "$decompressor" != cat ]; then
+ # There is an unavoidable race here, so I am not trying to avoid
+ # it. If we rewrite the script in Perl, we can use open(undef).
+ # Integration with in2tempfile looks too hard.
+ tmpfile="$(mktemp --tmpdir lesspipe.XXXXXXXXXX)"
+ exec {tf}>"$tmpfile"
+ rm -f "$tmpfile"
+ <&$f "$decompressor" >&$tf # Synchronous
+ # Get a new FD for reading on the tempfile. This might not be
+ # necessary if the command is just going to reopen it.
+ exec {f}>&- </dev/fd/$tf {tf}>&-
+ else
+ exec <&$f-
+ fi
+}
+
+# Note: the bash file tests (except -h) follow symlinks.
+if [ -f /dev/fd/$f ]; then
+ # Specify the input as -. This avoids irrelevant "set[ug]id" output for
+ # executables and also follows the symlink without us having to pass -L.
+ # We have to reopen stdin to not consume it.
+ content_type="$(</dev/fd/$f file -b -)"
+
+ case "$content_type" in
+ ('gzip compressed data'*)
+ decompressor=gunzip;;
+ ('bzip2 compressed data'*)
+ decompressor=bunzip2;;
+ ('XZ compressed data'*)
+ decompressor=unxz;;
+ (*)
+ decompressor=cat;;
+ esac
+
+ if [ "$decompressor" != cat ]; then
+ # Get the real content type. Whatever work the decompressor
+ # does before it is killed with SIGPIPE, we will duplicate later
+ # when we read the file, but there is no good alternative:
+ # - If we do a "tee" with one pipe held for later use, "tee"
+ # will block once it fills up that pipe. So if "file" needs
+ # more data than the pipe buffer to make its decision
+ # (probably unlikely), we could hang.
+ # - Going ahead and dumping the decompressed data to a temporary
+ # file is a waste of work if the content type turns out to be
+ # one for which we have a streaming viewer, but of course we
+ # don't know that yet.
+ content_type="$(</dev/fd/$f "$decompressor" | file -b -)" || [ $? == 141 ] # SIGPIPE
+ fi
+
+ case "$content_type" in
+ ('troff or preprocessor input'*)
+ ## Presuming our parent is "less", set COLUMNS based on the width
+ ## of the terminal.
+ #exec {pt}>/proc/$PPID/fd/1
+ #if [ -t $pt ]; then
+ # # XXX Does this really use stderr rather than the controlling terminal?
+ # export COLUMNS="$(bash -i -c 'echo $COLUMNS' 2>&$pt)"
+ #fi
+ #exec {pt}>&-
+ # viapty is a more comprehensive solution than the above. ~ 2011-09-05
+ # Hm, by the time we hack in PAGER=cat, maybe it's not any better... ~ 2011-09-06
+ run_stream_stdin viapty man -l -;;
+ (*'tar archive'*)
+ run_stream_stdin tar -tvv;;
+ (*'cpio archive'*)
+ run_stream_stdin cpio --quiet -itv;;
+ (RPM*)
+ run_stream_stdin rpm -qivl --changelog -p -;;
+ (*image*) # FIXME: will catch disk "images" as well as graphical images
+ if type identify &>/dev/null; then
+ identify_cmd=(identify)
+ elif type gm &>/dev/null; then
+ identify_cmd=(gm identify)
+ else
+ ### Give them the binary data.
+ #echo "No identify available"
+ #echo "Install ImageMagick or GraphicsMagick to browse images"
+ identify_cmd=(false)
+ fi
+ if [ "${identify_cmd[0]}" != false ]; then
+ # Make a file so identify won't stage it again with a
+ # nonreproducible name. (Note that "identify -" will
+ # stage again even if /dev/stdin is a file!)
+ stage_to_file_stdin
+ "${identify_cmd[@]}" /dev/stdin
+ fi;;
+ ('OpenDocument Text'|_,'OpenDocument Text Template')
+ stage_to_file_stdin
+ ### CUSTOM SCRIPT -- This part cannot be upstreamed unless odt2text is.
+ odt2text /dev/stdin # based on unzip, needs a file
+ ;;
+ # This has to come after the special case for odt2text.
+ ('Zip archive data'*|_,'OpenDocument'*|_,'OpenOffice 1.x'*)
+ stage_to_file_stdin
+ zipinfo /dev/stdin # needs a file
+ ;;
+ ('PDF document'*)
+ run_stream_stdin pdftotext - -;;
+ ('PostScript document'*)
+ # If you wanted to see the PostScript source code, sorry...
+ run_stream_stdin ps2ascii;;
+ (ELF*'dynamically linked'*)
+ stage_to_file_stdin # read twice
+ objdump -xRTdgl /dev/stdin
+ # The above does not seem to show data sections. Run a separate command
+ # to show them. (Of course I would prefer a single command.)
+ objdump -s -j .rodata -j .data /dev/stdin
+ ;;
+ (ELF*)
+ stage_to_file_stdin # read twice
+ objdump -xdgl /dev/stdin
+ objdump -s -j .rodata -j .data /dev/stdin
+ ;;
+ ('SQLite 3.x database'*)
+ # TODO: Reimplement copy to a temporary directory to
+ # handle databases that use write-ahead mode, etc. I had
+ # implemented this in main VM and I forgot to copy it to
+ # main-template before shutting down. :( ~ Noted 2015-12-28
+ stage_to_file_stdin
+ # I believe that this will honor a write lock held on an
+ # uncompressed database by another instance of SQLite. I
+ # further believe that is what we want.
+ sqlite3 /dev/stdin .dump # needs a file
+ ;;
+ ('compiled Java class data'*)
+ # Agghhh... why couldn't javap just take a filename? It's clear
+ # that all it is doing is translating our argument to a file and
+ # opening that, without checking the correctness of the package
+ # or class name.
+ # XXX: Proper cleanup on failure
+ dir="$(mktemp -d --tmpdir lesspipe-javap.XXXXXXXXXX)"
+ ln -s /dev/stdin "$dir/Stdin.class"
+ run_stream_stdin javap -classpath "$dir" -private -s -c -verbose Stdin
+ rm -rf "$dir"
+ ;;
+ # Disabled for now.
+ #(*.grl)
+ # stage_to_file_stdin
+ # head -n 1 /dev/stdin | fold -w 28
+ # tail -n +2;;
+ (*) # Unknown content type.
+ if [ "$decompressor" != cat ]; then
+ # Just decompress the file.
+ <&$f "$decompressor"
+ else
+ # We cannot add value in this case.
+ # Let "less" show the original file.
+ exit 77
+ fi;;
+ esac
+
+elif [ -d /dev/fd/$f ]; then
+ # nip blank line at the end
+ # XXX: how to make sure "less" has -R enabled?
+ #ls -H -al --color=always -- /dev/fd/$f/ | head --bytes=-3
+ ### Between coreutils-7.6-9.matt1.fc12 and coreutils-8.5-7.fc14, the
+ ### output has changed to put the \n at the end, so the special hack is
+ ### no longer necessary (less ignores a final \n) and in fact corrupts
+ ### the colors. ~ Matt 2011-06-19
+ ls -H -al --color=always -- /dev/fd/$f/
+else
+ # Device/special file. Don't print anything; let less complain about it.
+ # (For now, we are not interested in content sniffing on pipes. If less
+ # gained support for calling LESSOPEN for stdin, we might reconsider.)
+ exit 77
+fi
--- /dev/null
+#!/usr/bin/perl
+# ntfsresizecopy: Copy an NTFS filesystem from one block device to another,
+# resizing it to the size of the destination device in the process. (Uses
+# ntfsprogs from http://linux-ntfs.org/doku.php?id=ntfsprogs .) This is
+# EXPERIMENTAL; after using this script, you should mount the destination
+# read-only and check that everything looks intact.
+#
+# usage: ntfsresizecopy SRC DEST
+#
+# An expanding copy is just done with ntfsclone followed by ntfsresize.
+# A shrinking copy is done by running ntfsclone and ntfsresize on devices
+# specially crafted with the Linux device-mapper (requires dmsetup and losetup);
+# you may save time by checking first that the shrinkage is possible with
+# `ntfsresize -n -s SIZE SRC'.
+#
+# The special shrinking technique should be applicable to any filesystem type
+# that has an in-place shrinking command that doesn't write outside the new
+# size. Just change the calls to ntfsclone and ntfsresize; ntfsclone can be
+# replaced by a dd of the beginning of the source for filesystems that don't
+# have a sparse clone command.
+#
+# Version 2008.06.01
+# Maintained at http://mattmccutchen.net/utils/#ntfsresizecopy .
+# -- Matt McCutchen <matt@mattmccutchen.net>
+
+use strict;
+use warnings;
+use Fcntl qw(SEEK_SET SEEK_CUR SEEK_END);
+use List::Util qw(min);
+use filetest 'access';
+$| = 1;
+
+# These are not currently used but might be useful when modifying this script
+# for a filesystem that doesn't have an ntfsclone analogue.
+#my $shownProgress = ''; # cursor at its end
+#sub showProgress($) {
+# my ($newProgress) = @_;
+# my $shrink = length($shownProgress) - length($newProgress);
+# print("\b" x length($shownProgress), $newProgress,
+# $shrink > 0 ? (" " x $shrink, "\b" x $shrink) : ());
+# $shownProgress = $newProgress;
+#}
+#sub dd(**$) {
+# my ($srcfh, $destfh, $len) = @_;
+# while ($len > 0) {
+# showProgress("$len bytes left");
+# my $chunkLen = min(1048576, $len);
+# sysread($srcfh, my $data, $chunkLen) == $chunkLen or die 'read error';
+# syswrite($destfh, $data, $chunkLen) == $chunkLen or die 'write error';
+# $len -= $chunkLen;
+# }
+# showProgress('');
+#}
+
+sub deviceSize(*) {
+ # Determine the size of a device by seeking to its end. The ioctl used
+ # by `blockdev --getsize64' might be more official, but this one is
+ # easy and perhaps more portable.
+ my ($fh) = @_;
+ my $origPos = sysseek($fh, 0, SEEK_CUR) or die;
+ my $size = sysseek($fh, 0, SEEK_END) or die;
+ sysseek($fh, $origPos, SEEK_SET) or die;
+ return 0 + $size;
+}
+# Wrappers for dmsetup and losetup
+sub dm_create($@) {
+ my ($name, @table) = @_;
+ open(my $toDms, '|-', 'dmsetup', 'create', $name) or die;
+ print $toDms map(join(' ', @{$_}) . "\n", @table);
+ close($toDms) or die "dmsetup create $name failed";
+}
+sub dm_remove($) {
+ my ($name) = @_;
+ system('dmsetup', 'remove', $name) and warn "dmsetup remove $name failed";
+}
+sub losetup($) {
+ my ($file) = @_;
+ open(my $fromLs, '-|', 'losetup', '-fs', $file);
+ my $dev = <$fromLs>;
+ close($fromLs) and defined($dev) or die "losetup -fs $file failed";
+ chomp($dev);
+ return $dev;
+}
+sub losetup_d($) {
+ my ($dev) = @_;
+ system('losetup', '-d', $dev) and warn "losetup -d $dev failed";
+}
+
+scalar(@ARGV) == 2 or die <<EOU;
+usage: ntfsresizecopy SRC DEST
+See the comment at the top of the script for more information.
+EOU
+my ($src, $dest) = @ARGV[0..1];
+
+open(my $srcfh, '<', $src) or die "open($src) for reading failed: $!";
+-b $srcfh or die "Source $src must be a block device.\n";
+my $srcRdev = (stat(_))[6];
+open(my $destfh, '+<', $dest) or die "open($dest) for reading/writing failed: $!";
+-b $destfh or die "Destination $dest must be a block device.\n";
+my $destRdev = (stat(_))[6];
+$srcRdev == $destRdev and die "Source $src and destination $dest must not be "
+ . "the same block device.\nUse ntfsresize for in-place resizing.\n";
+
+my ($srcSize, $destSize) = (deviceSize($srcfh), deviceSize($destfh));
+# Assume that, since src and dest are block devices, sizes are divisible by 512.
+my ($srcBlocks, $destBlocks) = map($_ / 512, $srcSize, $destSize);
+my $shrinkBlocks = $srcBlocks - $destBlocks;
+
+print "Going to copy $src ($srcSize bytes) => $dest ($destSize bytes).\n";
+
+if ($shrinkBlocks > 0) {
+
+print "\nSTEP 1: ntfsclone the beginning of the src to the dest.\n";
+# Really, clone the whole src to a magical dest consisting of the real dest
+# followed by a zero target to make up the size difference.
+# Writes outside the dest's size will be lost to the zero target, but that
+# doesn't hurt anything. And under the assumption that the shrinkage is
+# possible, ntfsclone copies at most as much data as a simple dd of the
+# beginning of the src to the dest would.
+my $mdn = "ntfsresizecopy.$$.magicdest";
+my $magicDest = "/dev/mapper/$mdn";
+# If something in the "eval" fails, still clean up as much as possible.
+eval {
+ dm_create($mdn,
+ [0, $destBlocks, 'linear', $dest, 0],
+ [$destBlocks, $shrinkBlocks, 'zero']);
+ system('ntfsclone', '--overwrite', $magicDest, $src) and die 'ntfsclone failed.';
+};
+dm_remove($mdn);
+die $@ if $@;
+
+print "\n", <<EOM;
+STEP 2: ntfsresize the dest, bringing in the end of the src.
+NOTE: Please ignore ntfsresize's remarks about data loss (the src isn't being
+written so you haven't lost anything if this fails) and about shrinking the
+device (the device is already smaller).
+EOM
+# Really, resize a magical dest consisting of the real one plus the end of the
+# src. This leaves a shrunken filesystem on the beginning of the magical dest,
+# i.e., on the real dest.
+# ntfsresize doesn't seem to write outside the new size, but we use a snapshot
+# layer to be extra sure we don't mess up the src. The snapshot layer needs a
+# COW file that is at least one page in size, even though we expect no data to
+# be written to it.
+my $cowdev;
+eval {
+ open(my $cowfh, "+>", undef) or die 'failed to create temporary COW file';
+ truncate($cowfh, 4096) or die 'failed to expand temporary COW file';
+ $cowdev = losetup("/proc/$$/fd/" . fileno($cowfh));
+ dm_create($mdn,
+ [0, $destBlocks, 'linear', $dest, 0],
+ [$destBlocks, $shrinkBlocks, 'snapshot', $src, $cowdev, 'N', 1]);
+ open(my $toNr, '|-', 'ntfsresize', '-s', $destSize, $magicDest) or die 'ntfsresize failed.';
+ print $toNr "y\n"; # Confirm automatically because we aren't endangering the src.
+ close($toNr) or die 'ntfsresize failed.';
+};
+dm_remove($mdn);
+losetup_d($cowdev) if defined($cowdev);
+die $@ if $@;
+
+} else {
+
+print "\nSTEP 1: ntfsclone the src to the dest.\n";
+system('ntfsclone', '--overwrite', $dest, $src) and die 'ntfsclone failed.';
+
+print "\n", <<EOM;
+STEP 2: ntfsresize the dest.
+NOTE: Please ignore ntfsresize's remarks about data loss (the src isn't being
+written so you haven't lost anything if this fails).
+EOM
+open(my $toNr, '|-', 'ntfsresize', '-s', $destSize, $dest) or die 'ntfsresize failed.';
+print $toNr "y\n"; # Confirm automatically because we aren't endangering the src.
+close($toNr) or die 'ntfsresize failed.';
+
+}
+
+print "\nFinished!\n";
+
--- /dev/null
+#!/bin/bash
+# Well-behaved repeating TeX builder -- Matt McCutchen
+# usage: retex <cmd> <input-file-minus-.tex>
+
+set -e
+
+cmd="$1"
+in="$2"
+shift 2
+
+function run {
+ echo "[$iter] Running $2..."
+ yes X | "$cmd" -file-line-error-style "$in"
+}
+
+function compare {
+ echo "Comparing files..."
+ for f in "$in"*; do
+ # ignore pdfs because they have a nonreproducible "ID" at the end
+ # and logs because they aren't read
+ if ! [ "$f" != "${f%.keep*}" ] && ! [ "$f" != "${f%.pdf}" ] \
+ && ! [ "$f" != "${f%.log}" ]; then
+ suf="${f#$in}"
+ cmp "$in$suf" "$in.keep$suf" || return $?
+ fi
+ done
+ echo "Reached a fixed point."
+}
+
+function keep {
+ echo "Keeping files..."
+ for f in "$in"*; do
+ if ! [ "$f" != "${f%.keep*}" ]; then
+ suf="${f#$in}"
+ \cp -p "$in$suf" "$in.keep$suf"
+ fi
+ done
+}
+
+function clean {
+ echo "Cleaning up kept files..."
+ rm -f "$in.keep"*
+}
+
+function fail {
+ echo "Compiler exited with code $1."
+ # Remove output files here, a la .DELETE_ON_ERROR?
+ exit $1
+}
+
+iter=0
+keep
+run || fail $?
+limit=10
+while ! compare; do
+ iter=$(($iter + 1))
+ if [ $iter -ge $limit ]; then
+ echo "Did not reach a fixed point in $limit tries."
+ exit 2
+ fi
+ keep
+ run || fail $?
+done
+clean
+echo "Successful."
--- /dev/null
+#!/bin/bash
+# ~ 2017-11-12
+
+WANTS_FILE=/usr/local/etc/rpm-wants
+
+set -e
+set -o pipefail
+# XXX: $BASH_COMMAND may be wrong with pipefail, but it's better than what we had before.
+trap 'echo "Unexpected error in command: $BASH_COMMAND"; if [ -n "$tmpdir" ] && [ -r "$tmpdir/dnf.log" ]; then cat "$tmpdir/dnf.log"; fi ' ERR
+
+if [ $EUID != 0 ]; then
+ echo >&2 'Unfortunately, rpm-audit can only run as root because it calls'
+ echo >&2 '"dnf install --assumeno".'
+ exit 1
+fi
+
+tmpdir="$(mktemp -d --tmpdir rpm-audit.XXXXXX)"
+trap 'rm -rf "$tmpdir"' EXIT
+
+uncommented_entries=($(<"$WANTS_FILE" sed -re 's/(^|[^[:space:]])[[:space:]]*#.*$/\1/' | egrep -v '^$'))
+wants=()
+modules=()
+for e in "${uncommented_entries[@]}"; do
+ case "$e" in
+ (module:*) modules+=("${e#module:}");;
+ (*) wants+=("$e");;
+ esac
+done
+
+function rpmq {
+ # Set query format to match "dnf repoquery".
+ #
+ # Looks like rpm exits with the number of unmatched arguments, so we
+ # have to swallow any exit code. :( ~ Matt 2018-06-01
+ rpm -q --qf '%{NAME}-%|EPOCH?{%{EPOCH}}:{0}|:%{VERSION}-%{RELEASE}.%{ARCH}\n' "$@" || true
+}
+function strip_zero_epoch {
+ sed -e 's/-0:/-/'
+}
+function indent {
+ sed -e 's/^/ /'
+}
+function filter_packages {
+ # Exempt kernel-{core,devel,modules{,-extra}} from audit because of multiple
+ # installed versions: not worth trying to do something better.
+ grep -Ev '^(gpg-pubkey|kernel-core|kernel-devel|kernel-modules|kernel-modules-extra)-[^-]+-[^-]+$'
+}
+
+# Check 1: installed want providers vs. userinstalled
+
+# "dnf repoquery --installed --whatprovides" doesn't seem to accept multiple
+# arguments, and running it once per entry would be unacceptably slow. See how
+# long we can get away with this before looking for another solution.
+#
+# "rpm -q --whatprovides glibc.i686" does not work. Take all wants that look
+# like they have an architecture and use "rpm -q" instead. This may fail if
+# someone writes an actual provide of a package-like name with an architecture.
+# ~ Matt 2017-11-16
+pkg_arch_wants=()
+normal_wants=()
+for w in "${wants[@]}"; do
+ case "$w" in
+ (*.i686|*.x86_64)
+ pkg_arch_wants+=("$w");;
+ (*)
+ normal_wants+=("$w");;
+ esac
+done
+# Since F29, kernel packages keep getting unmarked as userinstalled. Not
+# investigating; just exclude them here. ~ Matt 2019-01-22
+{ rpmq --whatprovides "${normal_wants[@]}" && rpmq "${pkg_arch_wants[@]}"; } | strip_zero_epoch | filter_packages | sort | uniq >"$tmpdir/wants-installed"
+dnf repoquery --userinstalled | strip_zero_epoch | filter_packages | sort >"$tmpdir/userinstalled"
+
+if ! cmp -s "$tmpdir/wants-installed" "$tmpdir/userinstalled"; then
+ echo "Installed wants that are not marked as userinstalled:"
+ comm -2 -3 "$tmpdir/wants-installed" "$tmpdir/userinstalled" | indent
+ echo "Userinstalled packages that are not wants:"
+ comm -1 -3 "$tmpdir/wants-installed" "$tmpdir/userinstalled" | indent
+ echo "To correct, edit the wants file or use 'dnf mark {install|remove} PACKAGE_NAME'."
+ exit 1
+fi
+
+# Check 2: fresh solution of wants vs. installed (should catch different choice of provider, packages needing update, orphans, and problems)
+
+platform_id="$(sed -nre 's,^PLATFORM_ID="(.*)"$,\1,p' /etc/os-release)"
+
+SANDBOX_DNF=(dnf
+ --installroot="$tmpdir/installroot"
+ --setopt=cachedir=/var/cache/dnf # Share main cache to save time
+ --disableplugin=qubes-hooks # Qubes plugin takes unwanted actions when using an installroot
+ --releasever=/ --setopt=module_platform_id=$platform_id
+)
+
+"${SANDBOX_DNF[@]}" --assumeyes module enable "${modules[@]}" &>"$tmpdir/dnf.log"
+
+# --verbose for "Package ... will be installed" output. The human-readable list
+# of packages to install is harder to scrape because of line wrapping.
+#
+# "dnf --assumeno" exits 1. I don't see an obvious way to distinguish this from
+# real errors. However, real errors are likely to generate a diff anyway.
+{ "${SANDBOX_DNF[@]}" --verbose --assumeno install --best "${wants[@]}" 2>&1 || [ $? == 1 ]; } | tee -a "$tmpdir/dnf.log" | sed -nre 's/^---> Package ([^ ]+)\.([^ .]+) ([^ ]+) will be installed$/\1-\3.\2/p' | filter_packages | sort >"$tmpdir/solved"
+# Looks like "dnf repoquery --installed" doesn't catch extras.
+rpmq -a | filter_packages | strip_zero_epoch | sort >"$tmpdir/installed"
+
+if ! cmp -s "$tmpdir/solved" "$tmpdir/installed"; then
+ echo "Packages in fresh solution that are not installed:"
+ comm -2 -3 "$tmpdir/solved" "$tmpdir/installed" | indent
+ echo "Installed packages that are not in fresh solution:"
+ comm -1 -3 "$tmpdir/solved" "$tmpdir/installed" | indent
+ echo "To correct, install or remove packages to match the canonical solution,"
+ echo "or override the default choice of providers by adding the desired"
+ echo "providers to the wants file."
+ exit 1
+fi
+
+echo "RPM package set audit passed!"
--- /dev/null
+#!/bin/bash
+# Tool to maintain my pattern for RPM overrides, with the live path a symlink to
+# mattnew. Originally 2017-04-07. Revamped 2017-09-05 to automate the merge
+# like rpmconf-matt; I think this is as much work as I want to put in for now.
+set -e
+# "let" has stupid behavior of exiting 1. I'll use $(()) instead.
+unfinished_files=0
+# Prune /mnt to avoid error message about /mnt/root loop. There shouldn't be
+# any overrides under /mnt . ~ 2017-11-11
+for f_new in $(find / -xdev -path '/mnt' -prune -or -name '*.mattnew' -print); do
+ f_live="${f_new%.mattnew}"
+ expected_target="$(basename "$f_new")"
+ if link_target="$(readlink "$f_live")" && [ "$link_target" == "$expected_target" ]; then
+ : # good
+ else
+ #echo "$f_live"
+ echo "- Merging $f_live."
+ if rpmconf-matt-merge "$f_live.mattmerge" "$f_live.mattorig" "$f_live.mattnew" "$f_live"; then
+ # Commit the merge.
+ (
+ set -x
+ rm "$f_live.mattorig" "$f_live.mattnew"
+ mv "$f_live.mattmerge" "$f_live.mattnew"
+ mv "$f_live" "$f_live.mattorig"
+ ln -s "$expected_target" "$f_live"
+ )
+ echo "- Merged $f_live."
+ else
+ unfinished_files=$((unfinished_files+1))
+ echo "- Leaving $f_live merge unfinished."
+ fi
+ fi
+done
+if [ $unfinished_files -eq 0 ]; then
+ echo 'rpm overrides merge complete!'
+else
+ echo "No more files to consider. $unfinished_files left unfinished."
+fi
--- /dev/null
+#!/usr/bin/env python3
+# rpmconf-matt [--sync-only]
+
+# Known blockers to submission to Fedora: Basically removing assumptions specific to my setup.
+# - Decide how to name it vs. existing "rpmconf" in Fedora
+# - Make it configurable what part of the filesystem to scan. Is there a
+# default that's sufficiently safe for people to use on systems with untrusted
+# users?
+# - Should save its own files in another tree rather than blacklisting
+# directories known to cause problems if it adds its files there.
+# - Make diff program configurable / smarter default
+
+# TODO: Move the "sync" part into a dnf plugin that runs after every transaction
+# to make it more robust. Perhaps there can be an option to run the merge
+# interactively. With tighter integration, we might be able to skip scanning the
+# whole filesystem, but that would add one more kind of state that can get wrong.
+
+# Python 3 conversion, 2017-11-12
+# One could argue we should treat filenames and RPM names as bytes, but it's too
+# much of a pain to fix all the resulting fallout now, and we don't expect
+# adversarial names anyway. ~ Matt 2017-11-12
+#
+# Fedora 30 -> 32 upgrade: remove some .decode() calls (guess rpm-python changed
+# some things from bytes to str) ~ Matt 2020-06-01
+
+import collections
+import hashlib
+import os
+import pwd, grp
+import re
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+import time
+
+import rpm
+
+def getFileDigest(path, algoHint):
+ # TODO: Get the digest algorithm from RPM??
+ # File a bug to have rpmfiDigestAlgo exposed to Python?
+ if len(algoHint) == 32: # adobe packages
+ algo = hashlib.md5
+ elif len(algoHint) == 64:
+ algo = hashlib.sha256
+ else:
+ raise ValueError('Failed to guess digest algorithm')
+ with open(path, 'rb') as f:
+ return algo(f.read()).hexdigest()
+
+def setAttributes(fe, path):
+ # C.f. https://github.com/rpm-software-management/rpm/blob/rpm-4.13.0-rc1/lib/fsm.c#L713
+ # If user or group is not found, we'll get a KeyError.
+ os.chown(path, pwd.getpwnam(fe.user).pw_uid, grp.getgrnam(fe.group).gr_gid)
+ os.chmod(path, fe.mode & 0o7777)
+ os.utime(path, (fe.mtime, fe.mtime))
+
+def makeConfFindCommand(expr):
+ # Make a find command intended to catch any files ever managed by RPM,
+ # without getting confused by trees such as /proc
+ # or even malicious user-created files. Unfortunately, we can't just ask RPM
+ # which files it previously managed. The following heuristic should work for
+ # ml*: root filesystem (and avoid an error on /mnt/root, which -xdev doesn't
+ # catch), and exclude world-writable dirs such as /var/tmp.
+ # TODO: Make the search criteria more general.
+ return r"find / -xdev \( -path /mnt -or -path /var/www/html -or -perm /002 \) -prune -or " + expr
+
+def doMerge(a, b, c, output):
+ # TODO: Make diff program customizable.
+ # FIXME: Stop leaving ".orig" files around. At least name them distinctively.
+ # kdiff3 wrapper to work around issue with Qt apps running as root under Qubes ~ 2015-08-20
+ args = ['rpmconf-matt-merge', output, a if os.path.lexists(a) else '', b, c]
+ subprocess.check_call(args)
+
+class NeededPackage(object):
+ def __init__(self, header):
+ self.nvra = header.nvra
+ self.paths = dict() # live path -> (digest, path to download to)
+
+def rpmconf(syncOnly=False):
+ if os.geteuid() != 0:
+ print('This tool needs to run as root.', file=sys.stderr)
+ sys.exit(1)
+
+ # First, rename any rpmsave files and corresponding rpmbase files before we
+ # would overwrite the rpmbase files. We'll do this to any file type, even
+ # though there will only ever be base files for regular files.
+ print('Scanning for rpmsave files that need to be stamped.')
+ saveStamp = str(int(time.time()))
+ for savePath in subprocess.check_output(makeConfFindCommand("-name '*.rpmsave' -print"), shell=True).decode().splitlines():
+ livePath = savePath[:-len('.rpmsave')]
+ liveBasePath = livePath + '.rpmbase'
+ stampedSavePath = livePath + '.rpmsave-' + saveStamp
+ stampedSaveBasePath = stampedSavePath + '-base'
+ print('-- Timestamping rpmsave file for %s.' % livePath)
+ # XXX: Make sure we are not clobbering existing files?
+ os.rename(savePath, stampedSavePath)
+ if os.path.lexists(liveBasePath):
+ os.rename(liveBasePath, stampedSaveBasePath)
+
+ filesToMerge = collections.defaultdict(lambda: [[], False]) # live path -> (list of rpmsave stamps, bool if rpmnew)
+
+ print('Scanning for config files that need base files created.')
+ filesDone = {} # live path -> (nevra, digest)
+ needPackages = {} # nevra -> NeededPackage; no more defaultdict because NeededPackage needs header
+ ts = rpm.ts()
+ mi = ts.dbMatch()
+ for header in mi:
+ nevra = header.nevra
+ for fe in rpm.files(header):
+ # Only installed config files.
+ if fe.state != rpm.RPMFILE_STATE_NORMAL: continue
+ if not (fe.fflags & rpm.RPMFILE_CONFIG): continue
+ # For a ghost, we have no base content to write. Probably best to let this be a two-way merge if the file becomes non-ghost later.
+ if fe.fflags & rpm.RPMFILE_GHOST: continue
+ # For now, we only handle regular files. Conflicts on config symlinks seem to be rare.
+ if not stat.S_ISREG(fe.mode): continue
+ if fe.caps != '': raise NotImplementedError('File capabilities are not implemented: %s' % fe.name)
+
+ # Extension point directories whose readers can't handle additional *.rpm* files.
+ # /etc/skel/ is not actually causing a problem but leads to ugly persistent state.
+ # TODO: Find a better workaround.
+ if re.search('^(/etc/skel/|/etc/rpm/macros|/etc/logrotate.d/|/etc/grub.d/)', fe.name):
+ continue
+
+ # We need this check to avoid thrashing a conflicted base file.
+ if fe.name in filesDone:
+ (oldNevra, oldDigest) = filesDone[fe.name]
+ if fe.digest != oldDigest:
+ print('Conflict at %s: have %s from %s, ignoring %s from %s' %
+ (fe.name, oldDigest, oldNevra, fe.digest, nevra),
+ file=sys.stderr)
+ continue
+ filesDone[fe.name] = (nevra, fe.digest)
+
+ path_new = fe.name + '.rpmnew'
+ download_path = None
+ if os.path.lexists(path_new):
+ filesToMerge[fe.name][1] = True
+ # The live config file is not based on the current DB entry.
+ # Hopefully we already have a base for the live config file; if not, there's nothing we can do about it now.
+ # We do want to make sure the rpmnew file is correct.
+ if getFileDigest(path_new, fe.digest) != fe.digest:
+ download_path = path_new
+ else:
+ path_base = fe.name + '.rpmbase'
+ if not (os.path.lexists(path_base) and getFileDigest(path_base, fe.digest) == fe.digest):
+ if (os.path.lexists(fe.name) and getFileDigest(fe.name, fe.digest) == fe.digest):
+ # The live file has the original content.
+ # Copy the content and set the original attributes manually.
+ path_tmp = fe.name + '.rpmbase-tmp'
+ shutil.copyfile(fe.name, path_tmp)
+ setAttributes(fe, path_tmp)
+ os.rename(path_tmp, path_base)
+ print('- %s: Copied %s from %s.' % (nevra, path_base, fe.name))
+ else:
+ download_path = path_base
+ if download_path:
+ if nevra not in needPackages:
+ needPackages[nevra] = NeededPackage(header)
+ needPackages[nevra].paths[fe.name] = (fe.digest, download_path)
+ print('- %s: Need to download %s.' % (nevra, download_path))
+
+ if needPackages:
+ print('Downloading %d packages.' % len(needPackages))
+ packages_tmpdir = tempfile.mkdtemp(prefix='rpmconf-packages')
+ # Make sure the cpio archive is covered by a valid signature
+ # before we use it. Since dnf-download-signed currently doesn't
+ # check that the package is the one we asked for, this only
+ # ensures that the cpio archive is safe to extract. Then we
+ # check the digest on each needed file before using it. We're
+ # still correct if an attacker substitutes a different signed
+ # package in which the files we need have the same content.
+ # ~ Matt 2019-05-18
+ #
+ # Ideally, we'd only require a signature if the package came
+ # from a repository with gpgcheck=1. Right now, I use no
+ # unsigned packages. If I build my own packages again, I can
+ # either sign them or just fix them manually if they reach this
+ # code.
+ # ~ Matt 2017-11-11
+ subprocess.check_call(['dnf-download-signed'] + list(needPackages), cwd=packages_tmpdir)
+ for nevra, neededPkg in needPackages.items():
+ packagePath = '%s/%s.rpm' % (packages_tmpdir, neededPkg.nvra)
+ extract_tmpdir = tempfile.mkdtemp(prefix='rpmconf-extract-%s' % nevra)
+ cpioNeedPaths = ['.' + p for p in neededPkg.paths] # go figure
+ subprocess.check_call(['/bin/bash', '-c', 'p="$1"; shift; rpm2cpio "$p" | cpio --extract --quiet --preserve-modification-time --make-directories "$@"', '--', packagePath] + cpioNeedPaths, cwd=extract_tmpdir)
+ print('- Extracted %s.' % nevra)
+ for livePath, (needDigest, downloadPath) in neededPkg.paths.items():
+ tmpPath = extract_tmpdir + livePath
+ tmpDigest = getFileDigest(tmpPath, needDigest)
+ if tmpDigest != needDigest:
+ print('%s: got digest %s, wanted %s' % (livePath, tmpDigest, needDigest), file=sys.stderr)
+ continue
+ # This is easiest in case it is cross-filesystem, etc. mv should preserve all attributes.
+ subprocess.check_call(['mv', '-f', tmpPath, downloadPath])
+ print('-- Installed %s.' % downloadPath)
+ shutil.rmtree(extract_tmpdir)
+ shutil.rmtree(packages_tmpdir)
+
+ print('Scanning for obsolete rpmnew files.')
+ for newPath in subprocess.check_output(makeConfFindCommand("-type f -name '*.rpmnew' -print"), shell=True).decode().splitlines():
+ livePath = newPath[:-len('.rpmnew')]
+ if livePath not in filesToMerge: # only rpmnew files will be recorded in filesToMerge yet
+ print('-- Deleting %s. UNTESTED' % newPath)
+ #os.unlink(newPath)
+
+ # Remove rpmbase files for config files that are no longer managed, to not leave cruft.
+ # This intentionally does not remove rpmsave-base files. ~ 2014-07-03
+ # Note: If the config file had been modified, RPM would move it to
+ # rpmsave on package removal (whether or not it was noreplace), so we'd
+ # stamp the base file before we get here. ~ 2017-11-12
+ print('Scanning for obsolete rpmbase files.')
+ for basePath in subprocess.check_output(makeConfFindCommand("-type f -name '*.rpmbase' -print"), shell=True).decode().splitlines():
+ livePath = basePath[:-len('.rpmbase')]
+ if livePath not in filesDone:
+ print('-- Deleting %s.' % basePath)
+ os.unlink(basePath)
+
+ # "sync vs. merge" terminology is inspired by Perforce. We'll want a
+ # better term for "sync" before releasing this to the public.
+ if syncOnly:
+ print('rpmconf sync complete.')
+ return
+ else:
+ print('rpmconf sync complete. You can interrupt if you don\'t wish to merge now.')
+
+ print('Scanning for rpmsave files.')
+ for savePath in subprocess.check_output(makeConfFindCommand("-type f -name '*.rpmsave-*' -print"), shell=True).decode().splitlines():
+ m = re.search('^(.*)\.rpmsave-(\d+)$', savePath)
+ if not m: continue
+ (livePath, stamp) = (m.group(1), int(m.group(2)))
+ filesToMerge[livePath][0].append(stamp)
+
+ # Nested function for the ability to return from a nested loop...
+ def mergeFile(livePath):
+ print('- Merging %s.' % livePath)
+ (saveStamps, haveRpmnew) = filesToMerge[livePath]
+ saveStamps.sort() # mutates the original, that's OK
+ # TODO: If a package was uninstalled, we could have an rpmsave and rpmsave-base with no live.
+ # We want to alert the user that the configuration change is no longer having an effect. How?
+ # Currently kdiff3 comes up with a bunch of error dialogs and I have to manually intervene.
+ for i in range(len(saveStamps)):
+ c_output = '%s.rpmsave-%d' % (livePath, saveStamps[i+1]) if i+1 < len(saveStamps) else livePath
+ b = '%s.rpmsave-%d' % (livePath, saveStamps[i])
+ a = b + '-base'
+ try:
+ doMerge(a, b, c_output, c_output)
+ except subprocess.CalledProcessError:
+ print('- Leaving %s merge unfinished.' % livePath)
+ return 1
+ os.unlink(b)
+ if os.path.lexists(a): os.unlink(a)
+ if haveRpmnew:
+ try:
+ doMerge(livePath + '.rpmbase', livePath, livePath + '.rpmnew', livePath)
+ except subprocess.CalledProcessError:
+ print('- Leaving %s merge unfinished.' % livePath)
+ return 1
+ os.rename(livePath + '.rpmnew', livePath + '.rpmbase')
+ print('- Merged %s.' % livePath)
+ return 0
+
+ unfinishedFiles = 0
+ for livePath in filesToMerge:
+ unfinishedFiles += mergeFile(livePath)
+
+ if unfinishedFiles == 0:
+ print('rpmconf merge complete!')
+ else:
+ print('No more files to consider. %d files left unfinished.' % unfinishedFiles)
+
+if __name__ == '__main__':
+ # TODO: Adopt a real option-parsing library.
+ args = sys.argv[1:]
+ if args == []:
+ rpmconf()
+ elif args == ['--sync-only']:
+ rpmconf(syncOnly=True)
+ else:
+ print('Unrecognized arguments.', file=sys.stderr)
+ sys.exit(1)
--- /dev/null
+#!/usr/bin/perl
+# setexec: turns a file's executability on or off, same semantics as rsync's --executability
+# usage: setexec { + | - | --reference=<file> } <file> [...]
+
+$cmd = $ARGV[0];
+shift @ARGV;
+
+if ($cmd =~ /^--reference=(.*)$/) {
+ $ref_file = $1;
+ @stbuf = stat($ref_file);
+ if (@stbuf == 0) {
+ die "setexec: stat($ref_file): $!\n";
+ }
+ $ref_perms = $stbuf[2] & 07777;
+ if (($ref_perms & 0111) == 0) {
+ $cmd = '-';
+ } else {
+ $cmd = '+';
+ }
+} elsif ($cmd ne '+' && $cmd ne '-') {
+ die "usage: setexec { + | - | --reference=<file> } <file> [...]\n";
+}
+
+sub docmd($$) {
+ my ($cmd, $perms) = @_;
+ if ($cmd eq '+') {
+ return $perms | ($perms & 0444) >> 2;
+ } elsif ($cmd eq '-') {
+ return $perms & ~0111;
+ }
+}
+
+file: foreach $file (@ARGV) {
+ @stbuf = stat($file);
+ if (@stbuf == 0) {
+ warn "setexec: stat($file): $!, skipping file\n";
+ next file;
+ }
+ $perms = $stbuf[2] & 07777;
+ $perms = docmd($cmd, $perms);
+ chmod($perms, $file) or warn "setexec: chmod($perms, $file): $!, skipping file\n";
+}
--- /dev/null
+#!/bin/bash
+# stat2: Display just about everything about a file except its data -- Matt McCutchen
+
+indent=$'|\t'
+while [ "$#" != "0" ] ; do
+ echo "File $1"
+ echo "+-- Basic i-node information from \`stat':"
+ stat -- "$1" | sed -e "s/^/$indent/"
+ echo "+-- File version/generation and attributes from \`lsattr':"
+ lsattr -d -v -- "$1" | (read version attributes filenameignored && echo "$indent$version $attributes")
+ if [ ! -h "$1" ] ; then
+ echo "+-- POSIX ACLs from \`getfacl':"
+ getfacl --absolute-names --tabular -- "$1" | tail -n +2 | head -n -1 | sed -e "s/^/$indent/"
+ fi
+ echo "+-- Extended attributes in all namespaces from \`getfattr':"
+ getfattr --absolute-names -h -d -m '' -- "$1" | tail -n +2 | head -n -1 | sed -e "s/^/$indent/"
+ echo "'---------------------------------------"
+ shift
+done
--- /dev/null
+#!/usr/bin/perl
+
+# GNU Stow - manage the installation of multiple software packages
+# Copyright (C) 1993, 1994, 1995, 1996 by Bob Glickstein
+# Copyright (C) 2000,2001 Guillaume Morin
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+# $Id: stow.in,v 1.8 2002/01/05 11:27:01 gmorin Exp $
+# $Source: /cvsroot/stow/stow/stow.in,v $
+# $Date: 2002/01/05 11:27:01 $
+# $Author: gmorin $
+
+# Add ".dontfold" support. A ".dontfold" file inside a package prevents its
+# containing directory from being folded even if no other package contributes
+# to that directory. Note that the ancestor directories up to the package root
+# should have ".dontfold" files too or they might get folded. One must ensure
+# that each directory in the target gets a ".dontfold" from at most one package,
+# or stow will report conflicts.
+# - Matt 2009-10-19
+
+# Add support for poststow scripts. - Matt 2009-10-20
+
+require 5.005;
+use POSIX;
+use File::Basename;
+
+$ProgramName = $0;
+$ProgramName =~ s,.*/,,;
+
+$Version = '1.3.3';
+
+$Conflicts = 0;
+$Delete = 0;
+$NotReally = 0;
+$Verbose = 0;
+$ReportHelp = 0;
+$Stow = undef;
+$Target = undef;
+$Restow = 0;
+$PoststowOnly = 0;
+
+
+# FIXME: use Getopt::Long
+while (@ARGV && ($_ = $ARGV[0]) && /^-/) {
+ $opt = $';
+ shift;
+ last if /^--$/;
+
+ if ($opt =~ /^-/) {
+ $opt = $';
+ if ($opt =~ /^no?$/i) {
+ $NotReally = 1;
+ } elsif ($opt =~ /^c(o(n(f(l(i(c(ts?)?)?)?)?)?)?)?$/i) {
+ $Conflicts = 1;
+ $NotReally = 1;
+ } elsif ($opt =~ /^dir?/i) {
+ $remainder = $';
+ if ($remainder =~ /^=/) {
+ $Stow = $'; # the stuff after the =
+ } else {
+ $Stow = shift;
+ }
+ } elsif ($opt =~ /^t(a(r(g(et?)?)?)?)?/i) {
+ $remainder = $';
+ if ($remainder =~ /^=/) {
+ $Target = $'; # the stuff after the =
+ } else {
+ $Target = shift;
+ }
+ } elsif ($opt =~ /^verb(o(se?)?)?/i) {
+ $remainder = $';
+ if ($remainder =~ /^=(\d+)/) {
+ $Verbose = $1;
+ } else {
+ ++$Verbose;
+ }
+ } elsif ($opt =~ /^de(l(e(te?)?)?)?$/i) {
+ $Delete = 1;
+ } elsif ($opt =~ /^r(e(s(t(ow?)?)?)?)?$/i) {
+ $Restow = 1;
+ } elsif ($opt =~ /^p(o(s(t(s(t(o(w(-(o(n(ly?)?)?)?)?)?)?)?)?)?)?)?$/i) {
+ $PoststowOnly = 1;
+ } elsif ($opt =~ /^vers(i(on?)?)?$/i) {
+ &version();
+ } else {
+ &usage(($opt =~ /^h(e(lp?)?)?$/) ? undef :
+ "unknown or ambiguous option: $opt");
+ }
+ } else {
+ @opts = split(//, $opt);
+ while ($_ = shift(@opts)) {
+ if ($_ eq 'n') {
+ $NotReally = 1;
+ } elsif ($_ eq 'c') {
+ $Conflicts = 1;
+ $NotReally = 1;
+ } elsif ($_ eq 'd') {
+ $Stow = (join('', @opts) || shift);
+ @opts = ();
+ } elsif ($_ eq 't') {
+ $Target = (join('', @opts) || shift);
+ @opts = ();
+ } elsif ($_ eq 'v') {
+ ++$Verbose;
+ } elsif ($_ eq 'D') {
+ $Delete = 1;
+ } elsif ($_ eq 'R') {
+ $Restow = 1;
+ } elsif ($_ eq 'V') {
+ &version();
+ } else {
+ &usage(($_ eq 'h') ? undef : "unknown option: $_");
+ }
+ }
+ }
+}
+
+if ($PoststowOnly) {
+ &usage("--poststow-only should be used by itself") if @ARGV;
+} else {
+ &usage("No packages named") unless @ARGV;
+}
+
+# Changing dirs helps a lot when soft links are used
+$current_dir = &getcwd;
+if ($Stow) {
+ chdir($Stow) || die "Cannot chdir to target tree $Stow ($!)\n";
+}
+
+# This prevents problems if $Target was supplied as a relative path
+$Stow = &getcwd;
+
+chdir($current_dir) || die "Your directory does not seem to exist anymore ($!)\n";
+
+unless ($Target) {
+ if (basename($Stow) eq 'stow') {
+ $Target = &parent($Stow);
+ } else {
+ die "Not using the parent of the stow directory as the target because the basename of\n"
+ . "the stow directory is not \`stow'. This is a safety feature in case you\n"
+ . "mistakenly run this program in the target directory, to avoid trashing its\n"
+ . "parent. If you are sure you have the right stow directory, specify the target\n"
+ . "with the --target option to bypass this check.\n";
+ }
+}
+
+chdir($Target) || die "Cannot chdir to target tree $Target ($!)\n";
+$Target = &getcwd;
+
+foreach $package (@ARGV) {
+ $package =~ s,/+$,,; # delete trailing slashes
+ if ($package =~ m,/,) {
+ die "$ProgramName: slashes not permitted in package names\n";
+ }
+}
+
+if (!$PoststowOnly && ($Delete || $Restow)) {
+ @Collections = @ARGV;
+ &Unstow('', &RelativePath($Target, $Stow));
+}
+
+if (!$PoststowOnly && (!$Delete || $Restow)) {
+ foreach $Collection (@ARGV) {
+ warn "Stowing package $Collection...\n" if $Verbose;
+ &StowContents($Collection, &RelativePath($Target, $Stow));
+ }
+}
+
+if (!$NotReally) {
+ &RunPoststowScripts();
+}
+
+sub CommonParent {
+ local($dir1, $dir2) = @_;
+ local($result, $x);
+ local(@d1) = split(/\/+/, $dir1);
+ local(@d2) = split(/\/+/, $dir2);
+
+ while (@d1 && @d2 && (($x = shift(@d1)) eq shift(@d2))) {
+ $result .= "$x/";
+ }
+ chop($result);
+ $result;
+}
+
+# Find the relative patch between
+# two paths given as arguments.
+
+sub RelativePath {
+ local($a, $b) = @_;
+ local($c) = &CommonParent($a, $b);
+ local(@a) = split(/\/+/, $a);
+ local(@b) = split(/\/+/, $b);
+ local(@c) = split(/\/+/, $c);
+
+ # if $c == "/something", scalar(@c) >= 2
+ # but if $c == "/", scalar(@c) == 0
+ # but we want 1
+ my $length = scalar(@c) ? scalar(@c) : 1;
+ splice(@a, 0, $length);
+ splice(@b, 0, $length);
+
+ unshift(@b, (('..') x (@a + 0)));
+ &JoinPaths(@b);
+}
+
+# Basically concatenates the paths given
+# as arguments
+
+sub JoinPaths {
+ local(@paths, @parts);
+ local ($x, $y);
+ local($result) = '';
+
+ $result = '/' if ($_[0] =~ /^\//);
+ foreach $x (@_) {
+ @parts = split(/\/+/, $x);
+ foreach $y (@parts) {
+ push(@paths, $y) if ($y ne "");
+ }
+ }
+ $result .= join('/', @paths);
+}
+
+sub Unstow {
+ local($targetdir, $stow) = @_;
+ local(@contents);
+ local($content);
+ local($linktarget, $stowmember, $collection);
+ local(@stowmember);
+ local($pure, $othercollection) = (1, '');
+ local($subpure, $subother);
+ local($empty) = (1);
+ local(@puresubdirs);
+
+ return (0, '') if (&JoinPaths($Target, $targetdir) eq $Stow);
+ return (0, '') if (-e &JoinPaths($Target, $targetdir, '.stow'));
+ warn sprintf("Unstowing in %s\n", &JoinPaths($Target, $targetdir))
+ if ($Verbose > 1);
+ if (!opendir(DIR, &JoinPaths($Target, $targetdir))) {
+ warn "Warning: $ProgramName: Cannot read directory \"$dir\" ($!). Stow might leave some links. If you think, it does. Rerun Stow with appropriate rights.\n";
+ }
+ @contents = readdir(DIR);
+ closedir(DIR);
+ foreach $content (@contents) {
+ next if (($content eq '.') || ($content eq '..'));
+ $empty = 0;
+ if (-l &JoinPaths($Target, $targetdir, $content)) {
+ ($linktarget = readlink(&JoinPaths($Target,
+ $targetdir,
+ $content)))
+ || die sprintf("%s: Cannot read link %s (%s)\n",
+ $ProgramName,
+ &JoinPaths($Target, $targetdir, $content),
+ $!);
+ if ($stowmember = &FindStowMember(&JoinPaths($Target,
+ $targetdir),
+ $linktarget)) {
+ @stowmember = split(/\/+/, $stowmember);
+ $collection = shift(@stowmember);
+ if (grep(($collection eq $_), @Collections)) {
+ &DoUnlink(&JoinPaths($Target, $targetdir, $content));
+ } elsif ($pure) {
+ if ($content eq '.dontfold') {
+ $pure = 0;
+ } elsif ($othercollection) {
+ $pure = 0 if ($collection ne $othercollection);
+ } else {
+ $othercollection = $collection;
+ }
+ }
+ } else {
+ $pure = 0;
+ }
+ } elsif (-d &JoinPaths($Target, $targetdir, $content)) {
+ ($subpure, $subother) = &Unstow(&JoinPaths($targetdir, $content),
+ &JoinPaths('..', $stow));
+ if ($subpure) {
+ push(@puresubdirs, "$content/$subother");
+ }
+ if ($pure) {
+ if ($subpure) {
+ if ($othercollection) {
+ if ($subother) {
+ if ($othercollection ne $subother) {
+ $pure = 0;
+ }
+ }
+ } elsif ($subother) {
+ $othercollection = $subother;
+ }
+ } else {
+ $pure = 0;
+ }
+ }
+ } else {
+ $pure = 0;
+ }
+ }
+ # This directory was an initially empty directory therefore
+ # We do not remove it.
+ $pure = 0 if $empty;
+ if ((!$pure || !$targetdir) && @puresubdirs) {
+ &CoalesceTrees($targetdir, $stow, @puresubdirs);
+ }
+ ($pure, $othercollection);
+}
+
+sub CoalesceTrees {
+ local($parent, $stow, @trees) = @_;
+ local($tree, $collection, $x);
+
+ foreach $x (@trees) {
+ ($tree, $collection) = ($x =~ /^(.*)\/(.*)/);
+ &EmptyTree(&JoinPaths($Target, $parent, $tree));
+ &DoRmdir(&JoinPaths($Target, $parent, $tree));
+ if ($collection) {
+ &DoLink(&JoinPaths($stow, $collection, $parent, $tree),
+ &JoinPaths($Target, $parent, $tree));
+ }
+ }
+}
+
+sub EmptyTree {
+ local($dir) = @_;
+ local(@contents);
+ local($content);
+
+ opendir(DIR, $dir)
+ || die "$ProgramName: Cannot read directory \"$dir\" ($!)\n";
+ @contents = readdir(DIR);
+ closedir(DIR);
+ foreach $content (@contents) {
+ next if (($content eq '.') || ($content eq '..'));
+ if (-l &JoinPaths($dir, $content)) {
+ &DoUnlink(&JoinPaths($dir, $content));
+ } elsif (-d &JoinPaths($dir, $content)) {
+ &EmptyTree(&JoinPaths($dir, $content));
+ &DoRmdir(&JoinPaths($dir, $content));
+ } else {
+ &DoUnlink(&JoinPaths($dir, $content));
+ }
+ }
+}
+
+sub StowContents {
+ local($dir, $stow) = @_;
+ local(@contents);
+ local($content);
+
+ warn "Stowing contents of $dir\n" if ($Verbose > 1);
+ opendir(DIR, &JoinPaths($Stow, $dir))
+ || die "$ProgramName: Cannot read directory \"$dir\" ($!)\n";
+ @contents = readdir(DIR);
+ closedir(DIR);
+ foreach $content (@contents) {
+ next if (($content eq '.') || ($content eq '..'));
+ if (-d &JoinPaths($Stow, $dir, $content)) {
+ &StowDir(&JoinPaths($dir, $content), $stow);
+ } else {
+ &StowNondir(&JoinPaths($dir, $content), $stow);
+ }
+ }
+}
+
+sub StowDir {
+ local($dir, $stow) = @_;
+ local(@dir) = split(/\/+/, $dir);
+ local($collection) = shift(@dir);
+ local($subdir) = join('/', @dir);
+ local($linktarget, $stowsubdir);
+
+ warn "Stowing directory $dir\n" if ($Verbose > 1);
+ if (-l &JoinPaths($Target, $subdir)) {
+ ($linktarget = readlink(&JoinPaths($Target, $subdir)))
+ || die sprintf("%s: Could not read link %s (%s)\n",
+ $ProgramName,
+ &JoinPaths($Target, $subdir),
+ $!);
+ ($stowsubdir =
+ &FindStowMember(sprintf('%s/%s', $Target,
+ join('/', @dir[0..($#dir - 1)])),
+ $linktarget))
+ || (&Conflict($dir, $subdir), return);
+ if (-e &JoinPaths($Stow, $stowsubdir)) {
+ if ($stowsubdir eq $dir) {
+ warn sprintf("%s already points to %s\n",
+ &JoinPaths($Target, $subdir),
+ &JoinPaths($Stow, $dir))
+ if ($Verbose > 2);
+ return;
+ }
+ if (-d &JoinPaths($Stow, $stowsubdir)) {
+ &DoUnlink(&JoinPaths($Target, $subdir));
+ &DoMkdir(&JoinPaths($Target, $subdir));
+ &StowContents($stowsubdir, &JoinPaths('..', $stow));
+ &StowContents($dir, &JoinPaths('..', $stow));
+ } else {
+ (&Conflict($dir, $subdir), return);
+ }
+ } else {
+ &DoUnlink(&JoinPaths($Target, $subdir));
+ &StowNewDir($dir, $stow);
+ }
+ } elsif (-e &JoinPaths($Target, $subdir)) {
+ if (-d &JoinPaths($Target, $subdir)) {
+ &StowContents($dir, &JoinPaths('..', $stow));
+ } else {
+ &Conflict($dir, $subdir);
+ }
+ } else {
+ &StowNewDir($dir, $stow);
+ }
+}
+
+sub StowNewDir {
+ local($dir, $stow) = @_;
+ if (-e &JoinPaths($Stow, $dir, '.dontfold')) {
+ &DoMkdir(&JoinPaths($Target, $subdir));
+ &StowContents($dir, &JoinPaths('..', $stow));
+ } else {
+ &DoLink(&JoinPaths($stow, $dir),
+ &JoinPaths($Target, $subdir));
+ }
+}
+
+sub StowNondir {
+ local($file, $stow) = @_;
+ local(@file) = split(/\/+/, $file);
+ local($collection) = shift(@file);
+ local($subfile) = join('/', @file);
+ local($linktarget, $stowsubfile);
+
+ if (-l &JoinPaths($Target, $subfile)) {
+ ($linktarget = readlink(&JoinPaths($Target, $subfile)))
+ || die sprintf("%s: Could not read link %s (%s)\n",
+ $ProgramName,
+ &JoinPaths($Target, $subfile),
+ $!);
+ ($stowsubfile =
+ &FindStowMember(sprintf('%s/%s', $Target,
+ join('/', @file[0..($#file - 1)])),
+ $linktarget))
+ || (&Conflict($file, $subfile), return);
+ if (-e &JoinPaths($Stow, $stowsubfile)) {
+ (&Conflict($file, $subfile), return)
+ unless ($stowsubfile eq $file);
+ warn sprintf("%s already points to %s\n",
+ &JoinPaths($Target, $subfile),
+ &JoinPaths($Stow, $file))
+ if ($Verbose > 2);
+ } else {
+ &DoUnlink(&JoinPaths($Target, $subfile));
+ &DoLink(&JoinPaths($stow, $file),
+ &JoinPaths($Target, $subfile));
+ }
+ } elsif (-e &JoinPaths($Target, $subfile)) {
+ &Conflict($file, $subfile);
+ } else {
+ &DoLink(&JoinPaths($stow, $file),
+ &JoinPaths($Target, $subfile));
+ }
+}
+
+sub DoUnlink {
+ local($file) = @_;
+
+ warn "UNLINK $file\n" if $Verbose;
+ (unlink($file) || die "$ProgramName: Could not unlink $file ($!)\n")
+ unless $NotReally;
+}
+
+sub DoRmdir {
+ local($dir) = @_;
+
+ warn "RMDIR $dir\n" if $Verbose;
+ (rmdir($dir) || die "$ProgramName: Could not rmdir $dir ($!)\n")
+ unless $NotReally;
+}
+
+sub DoLink {
+ local($target, $name) = @_;
+
+ warn "LINK $name to $target\n" if $Verbose;
+ (symlink($target, $name) ||
+ die "$ProgramName: Could not symlink $name to $target ($!)\n")
+ unless $NotReally;
+}
+
+sub DoMkdir {
+ local($dir) = @_;
+
+ warn "MKDIR $dir\n" if $Verbose;
+ (mkdir($dir, 0777)
+ || die "$ProgramName: Could not make directory $dir ($!)\n")
+ unless $NotReally;
+}
+
+sub Conflict {
+ local($a, $b) = @_;
+
+ if ($Conflicts) {
+ warn sprintf("CONFLICT: %s vs. %s\n", &JoinPaths($Stow, $a),
+ &JoinPaths($Target, $b));
+ } else {
+ die sprintf("%s: CONFLICT: %s vs. %s\n",
+ $ProgramName,
+ &JoinPaths($Stow, $a),
+ &JoinPaths($Target, $b));
+ }
+}
+
+sub FindStowMember {
+ local($start, $path) = @_;
+ local(@x) = split(/\/+/, $start);
+ local(@path) = split(/\/+/, $path);
+ local($x);
+ local(@d) = split(/\/+/, $Stow);
+
+ while (@path) {
+ $x = shift(@path);
+ if ($x eq '..') {
+ pop(@x);
+ return '' unless @x;
+ } elsif ($x) {
+ push(@x, $x);
+ }
+ }
+ while (@x && @d) {
+ if (($x = shift(@x)) ne shift(@d)) {
+ return '';
+ }
+ }
+ return '' if @d;
+ join('/', @x);
+}
+
+sub RunPoststowScripts {
+ local(@scripts);
+ local($script);
+ local($psd) = &JoinPaths($Target, 'poststow.d');
+
+ if (opendir(PSD, $psd)) {
+ # good
+ } elsif ($! eq 'No such file or directory') {
+ # it doesn't exist, skip this step
+ warn "Poststow script directory does not exist\n" if $Verbose;
+ return;
+ } else {
+ die "$ProgramName: Cannot read poststow script directory ($!)\n";
+ }
+ warn "Running poststow scripts...\n" if $Verbose;
+ @scripts = readdir(PSD); # XXX Should we define the order?
+ closedir(PSD);
+ foreach $script (@scripts) {
+ next if ($script =~ m/^\./); # catches '.', '..', '.dontfold', and maybe others
+ warn "Running poststow script '$script'...\n" if $Verbose;
+ system(&JoinPaths($psd, $script), $Target) == 0 or
+ warn "Poststow script '$script' failed\n";
+ }
+}
+
+sub parent {
+ local($path) = join('/', @_);
+ local(@elts) = split(/\/+/, $path);
+ pop(@elts);
+ join('/', @elts);
+}
+
+sub usage {
+ local($msg) = shift;
+
+ if ($msg) {
+ print "$ProgramName: $msg\n";
+ }
+ print "$ProgramName (GNU Stow) version $Version\n\n";
+ print "Usage: $ProgramName [OPTION ...] PACKAGE ...\n";
+ print <<EOT;
+ -n, --no Do not actually make changes
+ -c, --conflicts Scan for conflicts, implies -n
+ -d DIR, --dir=DIR Set stow dir to DIR (default is current dir)
+ -t DIR, --target=DIR Set target to DIR (default is parent of stow dir)
+ -v, --verbose[=N] Increase verboseness (levels are 0,1,2,3;
+ -v or --verbose adds 1; --verbose=N sets level)
+ -D, --delete Unstow instead of stow
+ -R, --restow Restow (like stow -D followed by stow)
+ --poststow-only Run poststow scripts without (un)stowing any packages
+ -V, --version Show Stow version number
+ -h, --help Show this help
+EOT
+ exit($msg ? 1 : 0);
+}
+
+sub version {
+ print "$ProgramName (GNU Stow) version $Version\n";
+ exit(0);
+}
+
+# Local variables:
+# mode: perl
+# End:
--- /dev/null
+#!/bin/bash
+# system-update [--no-best]
+#
+# Update the system using Matt's RPM tools.
+# https://mattmccutchen.net/utils/#rpm
+
+best=true
+for arg in "$@"; do
+ case "$arg" in
+ (--no-best) best=false;;
+ (*) echo >&2 "Unsupported argument $arg"; exit 1;;
+ esac
+done
+
+# Obviously, if we get many more variations, we'll factor this out...
+if $best; then
+ dnf distro-sync --best && dnf autoremove && rpm-audit && rpmconf-matt && rpm-overrides-matt
+else
+ # No point in trying to audit
+ dnf distro-sync && dnf autoremove && rpmconf-matt && rpm-overrides-matt
+fi
--- /dev/null
+#!/bin/bash
+# usage: ungitar foo-dir <foo.gitar
+
+set -e
+trap "echo 'Unexpected error!' 1>&2" ERR
+if ! [ -e "$1" ]; then
+ mkdir "$1"
+fi
+cd "$1"
+
+if [ -e '.git' ]; then
+ echo 'The destination directory is already a git repository!' 1>&2
+ exit 1
+fi
+trap "rm -rf .git" EXIT
+
+ftx .git
+git-read-tree master
+git-checkout-index --all --force