#!/bin/bash
# I, Ian Kelling, follow the GNU license recommendations at
# https://www.gnu.org/licenses/license-recommendations.en.html. They
# recommend that small programs, < 300 lines, be licensed under the
# Apache License 2.0. This file contains or is part of one or more small
# programs. If a small program grows beyond 300 lines, I plan to switch
# its license to GPL.
# Copyright 2024 Ian Kelling
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this gets sourced. shebang is just for file mode detection
# * settings
if [[ $LESSHISTFILE == - ]]; then
HISTFILE=
c() { cd "$@"; }
elif [[ $HISTFILE ]]; then
# use an alternate history file when we are streaming.
if [[ -e $HOME/.iank-stream-on ]]; then
HISTFILE=/a/bin/data/stream_hist
else
HISTFILE=$HOME/.bh
fi
fi
# history personal
hip() {
history -c
HISTFILE=$HOME/.bh
history -r
}
# history for streaming
his() {
history -c
HISTFILE=/a/bin/data/stream_hist
history -r
}
source /a/bin/distro-setup/path-add-function
path-add /a/exe
# add this with absolute paths as needed for better security
#path-add --end /path/to/node_modules/.bin
## for yarn, etc
#path-add --end /usr/lib/node_modules/corepack/shims/
# pip3 --user things go here:
path-add --end ~/.local/bin
path-add --ifexists --end /a/work/libremanage
path-add --ifexists --end /a/opt/adt-bundle*/tools /a/opt/adt-bundle*/platform-tools
path-add --ifexists --end /a/opt/scancode-toolkit-3.10.
path-add --ifexists --end /p/bin
case $HOSTNAME in
sy|bo)
# https://askubuntu.com/questions/1254544/vlc-crashes-when-opening-any-file-ubuntu-20-04
if grep -qE '^VERSION_CODENAME="(nabia|focal)"' /etc/os-release &>/dev/null; then
export MESA_LOADER_DRIVER_OVERRIDE=i965
fi
;;
esac
export WCDHOME=/a
case $EUID in
0)
# shellcheck disable=SC2034 # used in brc
SL_SSH_ARGS="-F $HOME/.ssh/confighome"
;;
esac
# * include files
# generated instead of dynamic for the benefit of shellcheck
#for x in /a/bin/distro-functions/src/* /a/bin/!(githtml)/*-function?(s); do echo source $x ; done
source /a/bin/distro-functions/src/identify-distros
source /a/bin/log-quiet/logq-function
# for x in /a/bin/bash_unpublished/source-!(.#*); do echo source $x; done
source /a/bin/bash_unpublished/source-semi-priv
source /a/bin/bash_unpublished/source-state
if [[ $HOSTNAME == "$MAIL_HOST" ]]; then
export MAIL_HOST_P=t
else
export NOT_MAIL_HOST_P=t
fi
source /a/bin/ds/beet-data
# * functions
multimic() {
local i
local -a sources
m pactl unload-module module-loopback
m pactl unload-module module-null-sink
m pactl unload-module module-remap-source
IFS=" " read -r -a sources <<<"$(pacmd list-sources | sed -rn 's/.*name: <([^>]+).*/\1/p')"
if (( ! $# )); then
i=0
for s in ${sources[@]}; do
e $i $s
i=$(( i+1 ))
done
read -r l
set -- $l
fi
m pactl load-module module-null-sink sink_name=ianinput sink_properties=device.description=ianinputs
for i; do
m pactl load-module module-loopback source=${sources[i]} sink_dont_move=true sink=ianinput
done
pactl load-module module-remap-source source_name=iancombine master=ianinput.monitor source_properties=device.description=iancombine
}
# h ssh test
# For testing restrictive ssh.
hstest() {
install-my-scripts
d=$(mktemp -d)
sed '/^ *IdentityFile/d' ~/.ssh/config >$d/config
s command ssh -F $d/config -i /q/root/h "$@"
}
# h rsync test
# For testing restrictive rsync
hrtest() { #
install-my-scripts
d=$(mktemp -d)
sed '/^ *IdentityFile/d' ~/.ssh/config >$d/config
s rsync -e "ssh -F $d/config -i /q/root/h" "$@"
}
# rsync as root and avoid the default restrictive h key & config.
rootrsync() {
s rsync -e "ssh -F /root/.ssh/confighome" "$@"
}
zcheck() {
ssh bow DISPLAY=:0 scrot /tmp/oegu.jpg
scp bow:/tmp/oegu.jpg /t
ssh bow rm /tmp/oegu.jpg
feh /t/oegu.jpg
}
zmon() {
while true; do
ziva-screen
sleep 15
done
}
slemacs() {
local arg rtime v
arg="$1"
remote="$2"
if [[ $arg == [89]0Etiona* ]]; then
v=${arg::1}
rtime=${arg#*Etiona} # remote time
if [[ ! $rtime ]]; then
rtime=0
fi
dir=/a/opt/emacs-trisquel${v}-nox/.iank
ltime=$(stat -c%Y $dir/e/e/.emacs.d/init.el)
if (( ltime > rtime )); then
m rsync -rptL --delete --filter=". /b/ds/sl/rsync-filter" $dir "$remote":/home/iank
fi
fi
}
sle() { # sl emacs
local f=/home/iank/.emacs.d/init.el
sl --sl-test-cmd "sed -rn '/^VERSION=/{s/^.*=//;s/[^[:alnum:]]//gp}' /etc/os-release; test -e $f && stat -c%Y $f" --sl-test-hook slemacs "$@"
}
ccomp ssh sle
# Run this manually after .emacs.d changes. Otherwise, to check if
# files changed with find takes 90ms. sl normally only adds 25ms. We
# could cut it down to 10ms if we put things on a btrfs filesystem and
# looked for changes there, or used some inotify thing, but that seems
# like too much work.
egh() { # emacs gnuhope
RSYNC_RSH=ssh m rsync -rptL --delete --filter=". /b/ds/sl/rsync-filter" /a/opt/emacs-trisquel9-nox/.iank lists2d.fsf.org:.ianktrisquel_9
RSYNC_RSH=ssh m rsync -rptL --delete --filter=". /b/ds/sl/rsync-filter" /a/opt/emacs-trisquel8-nox/.iank lists2d.fsf.org:/home/iank
}
ekw() {
local shell="bash -s"
if [[ $HOSTNAME != kw ]]; then
shell="ssh kw.office.fsf.org"
bbk -m /a -t kw
fi
$shell <<'EOF'
sudo mkdir /root/.ianktrisquel_9
sudo rsync -rptL --delete --filter=". /b/ds/sl/rsync-filter" /a/opt/emacs-trisquel9-nox/.iank /root/.ianktrisquel_9
rsync -rptL --delete --filter=". /b/ds/sl/rsync-filter" /a/opt/emacs-trisquel8-nox/.iank /home/iank
EOF
}
rm-docker-iptables() {
s iptables -S | gr docker | gr -- -A | sed 's/-A/-D/'| while read -r l; do sudo iptables $l; done
s iptables -S -t nat | gr docker | gr -- -A | sed 's/-A/-D/'| while read -r l; do sudo iptables -t nat $l; done
s iptables -S | gr docker | gr -- -N | sed 's/-N/-X/'| while read -r l; do sudo iptables $l; done
s iptables -S -t nat | gr docker | gr -- -N | sed 's/-N/-X/'| while read -r l; do sudo iptables -t nat $l; done
}
# usage mkschroot [-] distro codename packages
# - means no piping in of sources.list
#
# note some useful post mkschroot i've used in the past
# tu /nocow/schroot/flidas/etc/sudoers <> $f
# fi
# su iank
# wget https://aur.archlinux.org/cgit/aur.git/snapshot/anbox-image-gapps.tar.gz
# tar xzf anbox-image-gapps.tar.gz
# cd anbox-image-gapps
# makepkg -s
}
# clock back in to timetrack from last entry
tback() {
sqlite3 /p/.timetrap.db "update entries set end = NULL where id = (select max(id) from entries);"
}
# sshfs example:
# s sshfs bu@$host:/bu/home/md /bu/mnt -o reconnect,ServerAliveInterval=20,ServerAliveCountMax=30 -o allow_other
edelayoff() {
echo all >/var/spool/exim4/gw/.no-delay-eximids
if [[ $EUID == 0 ]]; then
chown iank:iank /var/spool/exim4/gw/.no-delay-eximids
fi
}
edelayon() {
echo >/var/spool/exim4/gw/.no-delay-eximids
if [[ $EUID == 0 ]]; then
chown iank:iank /var/spool/exim4/gw/.no-delay-eximids
fi
}
eqgo() {
local -a array tmpstr delayon
delayon=true
if grep -qFx all /var/spool/exim4/gw/.no-delay-eximids; then
delayon=false
fi
if $delayon; then
echo all >/var/spool/exim4/gw/.no-delay-eximids
fi
tmpstr=$(exiqgrep -i -r.\*)
mapfile -t array <<<"$tmpstr"
enn -M "${array[@]}"
if $delayon; then
echo >/var/spool/exim4/gw/.no-delay-eximids
fi
}
eqgo1() {
local eid
eid="$(exipick -i -r.\*|h1)"
sed -n "/^all$/p;\$a $eid" /var/spool/exim4/gw/.no-delay-eximids
enn -M "$eid"
}
# exim -M (in namespace and without delay)
ennm() {
local eid
for eid; do
printf "%s\n" "$eid" >>/var/spool/exim4/gw/.no-delay-eximids
done
enn -M "$@"
}
gnupload(){
/a/f/gnulib/build-aux/gnupload "$@"
}
abrowserrmcompat() {
local f
ngset
f=(/p/c/firefox*/compatibility.ini)
if (( ${#f[@]} )); then
rm ${f[@]}
fi
ngreset
}
checkre() {
s checkrestart -b /a/bin/ds/checkrestart-blacklist -pv
}
cp-blocked-domains-to-brains() {
cp /a/f/ans/roles/exim/files/mx/simple/etc/exim4/bad-sender_domains /a/f/brains/sysadmin/kb/blocked_email_domains.mdwn
}
cp-blocked-domains-to-ansible() {
cp /a/f/brains/sysadmin/kb/blocked_email_domains.mdwn /a/f/ans/roles/exim/files/mx/simple/etc/exim4/bad-sender_domains
}
daycat() {
ngset
hrcat /m/md/daylert/{cur,new}/*
ngreset
}
dayclear() {
ngset
rm -f /m/md/daylert/{cur,new}/*
}
acat() {
ngset
hrcat /m/md/alerts/{cur,new}/*
ngreset
hr; echo bk; hr
ssh bk.b8.nz "shopt -s nullglob; hrcat /m/md/INBOX/new/* /m/md/INBOX/cur/*"
}
aclear() {
ngset
rm -f /m/md/alerts/{cur,new}/*
ngreset
ssh bk.b8.nz "shopt -s nullglob; rm -f /m/md/INBOX/new/* /m/md/INBOX/cur/*"
system-status _
}
alerts() {
find /var/local/cron-errors /home/iank/cron-errors /sysd-mail-once-state -type f
}
ralerts() { # remote alerts
local ret shell
# this list is duplicated in check-remote-mailqs
for h in bk je li frodo x3wg kdwg sywg; do
echo $h:
shell="ssh $h"
if [[ $HOSTNAME == "${h%wg}" ]]; then
shell=
fi
ret=0
$shell find /var/local/cron-errors /home/iank/cron-errors /sysd-mail-once-state -type f || ret=$?
if (( ret )); then
echo ret:$ret
fi
done
}
ap() {
# pushd in case current directory has an ansible.cfg file
pushd /a/xans >/dev/null
ansible-playbook -v -i ${1:- $(hostname -f)}, site.yml
popd >/dev/null
}
aw() {
pushd /a/work/ans >/dev/null
time ansible-playbook -i inventory adhoc.yml "$@"
popd >/dev/null
}
ad() {
pushd /a/bin/distro-setup/a >/dev/null
ansible-playbook site.yml "$@"
popd >/dev/null
}
astudio() {
# googling android emulator libGL error: failed to load driver: r600
# lead to http://stackoverflow.com/a/36625175/14456
export ANDROID_EMULATOR_USE_SYSTEM_LIBS=1
/a/opt/android-studio/bin/studio.sh "$@" & r
}
# Convert brains file path to url and vice versa
# usage: brains [URL_OR_PATH]
brains() {
_iki-convert brains.fsf.org/wiki "$@"
}
glue() {
_iki-convert gluestick.office.fsf.org "$@"
}
# usage: see above
_iki-convert() {
local url url_prefix path input repo_dir dir url_dir url name
url_prefix="$1"
name="${url_prefix%%.*}"
repo_dir="/f/$name"
shift
if [[ $1 ]]; then
input="$*"
else
read -r -p "enter path or url"$'\n' input
fi
case $input in
http*)
path="$repo_dir/${input##http*://"$url_prefix"/}"
# for files like x.jpg, we dont need to convert the extension.
if [[ $path == */ ]]; then
path=${path%/}.mdwn
# brains adds trailing slash, but without trailing is still
# valid. We can't be totally sure whether to add mdwn, but we
# can guess based on the existence of the file. We can't be sure
# because it could be a file like x.jpg, that we just don't have
# in our local repo.
elif [[ ! -f $path && -e $path.mdwn ]]; then
path=${path}.mdwn
fi
j printf "%s\n" "$path"
;;
*)
path=$(fp "$input")
url_dir=$(echo "$path" | sed -r "s,^(/a)?$repo_dir/,,")
url="https://$url_prefix/$url_dir"
if [[ $url == *.mdwn ]]; then
url="${url%.mdwn}/"
fi
j echo "$url"
;;
esac
}
# Generate beet smartplaylists for navidrome.
# for going in the reverse direction, run
# /b/ds/navidrome-playlist-export
beetsmartplaylists() {
install -m 0700 -d /tmp/ianbeetstmp
beet splupdate
# kill off any playlists we deleted. they will still need manual
# killing from a navidrome client.
rm -rf /i/converted/beetsmartplaylists
mkdir -p /i/converted/beetsmartplaylists
for f in /tmp/ianbeetstmp/*; do
sed 's,^/i/m,/i/converted,;s,\.flac$,.mp3,' "$f" >"/i/converted/beetsmartplaylists/${f##*/}"
rm "$f"
done
rmdir /tmp/ianbeetstmp
}
# internal function for beetrating, in case we need to ssh
beetrating-stdin() {
local tmp rating path cpath sqlpath userid
# plucked this from the db. im the only user.
userid=23cc2eb9-e35e-4811-a0f0-d5f0dd6eb634
while read -r rating path; do
cpath="/i/converted${path#/i/m}" # converted path
case $cpath in
*.flac)
cpath="${cpath%.*}.mp3"
;;
esac
if [[ ! -e $cpath ]]; then
echo "beetraing: error: this should not happen, path does not exist: $cpath"
return 1
fi
sqlpath="${cpath//\'/\'\'}"
old_rating=$(sqlite3 /i/navidrome/navidrome.db "select rating from annotation inner join media_file on item_id = id where path = '$sqlpath' and item_type = 'media_file';")
if [[ $old_rating ]]; then
if [[ $old_rating != "$rating" ]]; then
echo "setting rating $old_rating -> $rating $cpath"
# https://stackoverflow.com/a/50317320
# we got a timeout error once. arbitrarily chose 15 seconds.
sqlite3 /i/navidrome/navidrome.db ".timeout 15000" "
update annotation set rating = $rating
where item_id in (
select media_file.id from annotation inner join media_file on annotation.item_id = media_file.id
where media_file.path = '$sqlpath' and annotation.item_type = 'media_file' );"
fi
else
echo "setting rating $rating $cpath"
# /a/opt/navidrome/persistence/sql_annotations.go v0.48.0
# https://www.sqlite.org/lang_insert.html
sqlite3 /i/navidrome/navidrome.db ".timeout 15000" "insert into annotation select '$(uuidgen)', '$userid', id, 'media_file', 0, NULL, $rating, 0, NULL from media_file where path = '$sqlpath';"
fi
done
}
# Export beets ratings into navidrome
beetrating() {
local ssh_prefix
if [[ $HOSTNAME != kd ]]; then
ssh_prefix="ssh b8.nz"
fi
# shellcheck disable=SC2016 # obvious reason
beet ls -f '$rating $path' $nav_convert_query | $ssh_prefix beetrating-stdin
}
# Do transcoding and hardlinking of audio files for navidrome.
beetconvert() {
local tmpf
tmpf="$(mktemp)"
# a bunch of effort to ignore output we dont care about...
sed 's/^format_item:.*/format_item: ignore_this/' ~/.config/beets/config.yaml >$tmpf
beet -c $tmpf convert -y $nav_convert_query > >(grep -vFx 'ignore_this' ||:) 2> >(grep -v '^convert: Skipping' ||:)
rm "$tmpf"
}
# This deletes files in the converted directory which should no longer
# be there due to a rename of the unconverted file.
beetconvert-rm-extras() {
local l tmpf
local -A paths
tmpf="$(mktemp)"
# shellcheck disable=SC2016 # obvious reason
beet ls -f '$path' $nav_convert_query >"$tmpf"
## begin removal of files that are leftover from previous conversion,
# eg, previously rated > 1, now rated 1.
while read -r l; do
convertedpath="/i/converted${l#/i/m}"
case $convertedpath in
*.flac) convertedpath="${convertedpath%.flac}.mp3" ;;
esac
paths[$convertedpath]=t
done <"$tmpf"
find /i/converted -path /i/converted/beetsmartplaylists -prune -o \( -type f -print \) -name '*.mp3' -o -name '*.m4a' >"$tmpf"
while read -r l; do
if [[ ! ${paths[$l]} ]]; then
rm -v "$l"
fi
# note: the pruning is duplicative of filtering on name, but whatever.
done <"$tmpf"
rm "$tmpf"
}
beets-gen-playlists() {
local i str
local -a query_array query_str
for i in "${!bpla[@]}"; do
query_str=()
eval "query_array=(${bpla[$i]})"
for str in "${query_array[@]}"; do
query_str+=("\"$str\"")
done
cat <$tmpf
beet -c $tmpf modify -y "$@" > >(grep -vFx -e 'ignore_this' -e 'Modifying 1 items.' ||:)
rm "$tmpf"
beetag-nostatus 1
}
kill-bg-quiet() {
# https://stackoverflow.com/a/5722874
kill %% 2>/dev/null ||:; wait %% 2>/dev/null ||:
}
# debug variables
dv() {
for arg; do
printf "%s=%s " "$arg" "${!arg}"
done
echo
}
# Must be called from beetag for variables to be setup
beetag-help() {
local -i i j col_total row col button_total row_total remainder_cols remainder_term
col_total=4
button_total=${#button_map[@]}
row_total=$(( button_total / col_total ))
remainder_cols=$(( button_total % col_total ))
# for debugging
#dv button_total row_total remainder_cols
beetag-nostatus
# - 3 is just a constant that helps things work in practice.
if [[ $LINES ]] && (( LINES - 3 < scrolled )); then
hr
for (( i=0; i/dev/null; then
printf "%s\n" "$*" | socat - /tmp/mpvsock >/dev/null ||:
fi
}
# meant to be called from beetag
# o for get output
mpvrpco() {
# note: testing for background jobs will output nothing if we are in a pipeline
printf "%s\n" "$*" | socat - /tmp/mpvsock ||:
}
# meant to be called from beetag
mpvrpc-percent-pos() {
mpvrpco '{ "command": ["get_property", "percent-pos"] }' | jq .data | sed 's/\..*/%/' 2>/dev/null ||:
}
# run if not running.
#
# Note: this does not work with shell scripts as they are normally
# invoked, because the ps output has the interpreter at the start.
# A workaround is to invoke the command in that format, or we could
# do various other workarounds.
#
# background, this relies on how ps converts newlines in arguments to spaces, and
# assumes we won't be searching for a command with spaces in its arguments
rinr() {
# shellcheck disable=SC2009 # pgrep has no fixed string option, plus see above.
if ps h -o args -C "${1##*/}" | grep -Fxqv "$*" &>/dev/null || [[ $? == 141 ]]; then
"$@"
fi
}
# variation of above: run or wait if running
rowir() {
local pid
pid=$(ps h -o 'pid,args' -C "${1##*/}" | sed -r 's/^[[:space:]]*([0-9]+)[[:space:]](.*)/\1\n\2/' | grep -B1 -Fx "$*" | head -n1 ||: )
if [[ $pid ]]; then
# https://unix.stackexchange.com/questions/427115/listen-for-exit-of-process-given-pid
tail --pid="$pid" -f /dev/null
else
"$@"
fi
}
mpvrpc-loadfile() {
local path nextpath cachedir finalpath nextpath count
cachedir=$HOME/.iank-music-cache
path="$1"
nextpath="$2"
# note: logic duplicated in beetpull
local remote_p=true
if [[ $HOSTNAME == kd ]]; then
remote_p=false
fi
if $remote_p; then
finalpath="$cachedir${path#/i/m}"
rowir rsync --partial -a --inplace --mkpath "b8.nz:$path" "$finalpath"
finalnextpath="$cachedir${nextpath#/i/m}"
count=$(pgrep -a -f "^rsync --partial -a --inplace --mkpath $cachedir" || [[ $? == 1 ]] )
# allow us to start 2 rsyncs in the background
if [[ $count == [01] ]]; then
rinr rsync --partial -a --inplace --mkpath "b8.nz:$nextpath" "$finalnextpath" &
fi
else
finalpath="$path"
fi
mpvrpc '{ "command": ["loadfile", "'"$finalpath"'"] }'
}
# tag with beets.
# usage: beetag [-r] [-s] QUERY
# it lists the query, reads an input char for tagging one by one.
#
# note, you may want to change the play command for doing rapid taging
# by immediately jumping forward into the song. this is set in the beets
# config yaml.
#
# (available buttons: ` \ ) ] [ and non-printing chars, see
# https://stackoverflow.com/questions/10679188/casing-arrow-keys-in-bash
#
#
# note: after foregrounding the player, must quit it to get back. can't ctrl-c.
#
# keys I dont need help to remember:
# 1-5 rate
# q quit
# ret next
#
# todo: enter should also unpause
beetag() {
local last_genre_i fstring tag id char new_item char_i genre tag remove doplay i j random path
local do_rare_genres read_wait line lsout tmp ls_line skip_lookback
local escape_char escaped_input expected_input skip_input_regex right_pad erasable_line seek_sec
local pl_state_path pl_state_dir pl_state_file tmpstr
local new_random pl_seed_path seed_num seed_file fmt first_play repeat1
local -a buttons button_map ids tags tmp_tags initial_ls ls_lines paths
local -A button_i
local -i i j volume scrolled id_count line_int skip_start pre_j_count head_count skip_lookback
local -i overflow_lines overflow
first_play=true
erasable_line=false
escape_char=$(printf "\u1b")
scrolled=999 # more than any $LINES
### begin arg processing ###
random=false
repeat1=false
new_random=false
case $1 in
-r)
random=true
shift
;;
-s)
random=false
shift
;;
-x)
new_random=true
shift
;;
esac
if (( ! $# )); then
echo beetag: error expected a query arg >&2
return 1
fi
### end arg processing ###
# note: I used to do beetpull here, but mpv + ssfs on slowish
# connection leads to bad/buggy result.
do_rare_genres=false
volume=70
read_wait=2
doplay=true
last_genre_i=$(( ${#common_genres[@]} - 1 ))
buttons=( {a..p} {r..w} {6..8} , . / - "=")
button_map=(${common_genres[@]} ${pl_tags[@]})
fstring=
for tag in "${pl_tags[@]}"; do
fstring+="%ifdef{$tag,$tag }"
done
for (( i=0; i<${#buttons[@]}; i++ )); do
button_i[${buttons[i]}]=$i
done
# note: this structure of files is rather haphazard.
seed_num=1 # later we might want a few
seed_file=seed$seed_num
if $random; then
pl_state_file=$seed_num
else
pl_state_file=sorted
fi
pl_state_dir=/b/data/pl-state
if [[ $playlist ]]; then
pl_state_dir=$pl_state_dir/$playlist
else
pl_state_dir=$pl_state_dir/nopl
fi
pl_state_path=$pl_state_dir/$pl_state_file
pl_seed_path=$pl_state_dir/$seed_file
if $new_random || [[ ! -r $pl_seed_path ]]; then
mkdir -p $pl_state_dir
{ base64 < /dev/urandom | head -c 200 ||:; echo; } > $pl_seed_path
fi
# PijokVipiotOzeph is just a random string for a delimiter
# shellcheck disable=SC2016 # false positive
fmt='%ifdef{rating,$rating }'"$fstring"'$genre | $title - $artist - $album $length $id PijokVipiotOzeph $path'
# shellcheck disable=SC2016 # obvious reason
tmpstr=$(beet ls -f "$fmt" "$@" | { if $random; then sort -R --random-source=$pl_seed_path; else cat; fi; } )
mapfile -t initial_ls <<<"$tmpstr"
if [[ ! ${initial_ls[0]} ]]; then
echo "beetag: error: no result from beet ls $*"
return 1
fi
id_count=${#initial_ls[@]}
for line in "${initial_ls[@]}"; do
path="${line#*PijokVipiotOzeph }"
# https://github.com/koalaman/shellcheck/issues/2171
# shellcheck disable=SC2190 # bug in shellcheck, looking at paths from an earlier function
paths+=("$path")
line_no_path="${line% PijokVipiotOzeph*}"
id="${line_no_path##* }"
ids+=("$id")
right_pad="${line_no_path%% |*}"
ls_line="$(printf %-11s "$right_pad")${line_no_path#"$right_pad"}"
ls_lines+=("$ls_line")
i=$(( i+1 ))
done
j=0
if [[ $playlist ]]; then
if [[ -r $pl_state_path ]]; then
j=$(cat $pl_state_path)
fi
fi
# i only care to see a smallish portion of the list when starting.
head_count=$(( LINES - 20 ))
head_start=$(( j - head_count / 2 ))
if (( head_start < 0 )); then
head_start=0
fi
for (( i=head_start; i < head_count && i < id_count; i++ )); do
ls_line="${ls_lines[$i]}"
if (( i == j )); then
echo "* $ls_line"
else
echo "$ls_line"
fi
done
if $doplay; then
#{ mpv --profile=a --volume=$volume --idle 2>&1 & } 2>/dev/null
mpv --profile=a --volume=$volume --idle &
# if we dont sleep, can expect an error like this:
# socat[1103381] E connect(5, AF=1 "/tmp/mpvsock", 14): Connection refused
sleep .1
fi
while true; do
id=${ids[j]}
path="${paths[$j]}"
lsout="${ls_lines[j]}"
tags=( ${lsout%%,*} )
beetag-help
printf "██ %s\n" "$lsout"
beetag-nostatus 1
if $doplay; then
# https://stackoverflow.com/a/7687716
# note: duplicated down below
#
# notes on old method of invoking mpv each time:
# https://superuser.com/questions/305933/preventing-bash-from-displaying-done-when-a-background-command-finishes-execut
# we can't disown or run in a subshell or set +m because all that
# disabled job control from working properly in ways we want.
# todo: figure out some kind of answer to this. I think the solution
# is that we are waiting in 2 second intervals and checking if the
# background job exists. Instead, we should make mpv just idle
# when it is done with a song and then send it a command to play a new track.
#{ mpv --profile=a --volume=$volume "$path" 2>&1 & } 2>/dev/null
# old
#{ beet play "--args=--volume=$volume" "id:$id" 2>&1 & } 2>/dev/null
# on slow systems, we may need to wait like .3 seconds before mpv
# is ready. so impatiently check until it is ready
if $first_play; then
first_play=false
for (( i=0; i<20; i++ )); do
if [[ $(mpvrpco '{ "command": ["get_property", "idle-active"] }' 2>/dev/null | jq .data) == true ]]; then
mpvrpc-loadfile "$path" 2>/dev/null
break
fi
sleep .1
done
else
mpvrpc-loadfile "$path"
fi
erasable_line=false
fi
while true; do
char=
if $doplay; then
ret=0
read -rsN1 -t $read_wait char || ret=$?
read_wait=2
# Automatically skip to the next song if this one ends, unless
# we turn off the autoplay.
if (( ret == 142 )) || [[ ! $char ]]; then
if jobs -p | grep -q . &>/dev/null && \
[[ $(mpvrpco '{ "command": ["get_property", "idle-active"] }' | jq .data) == false ]]; then
continue
else
break
fi
fi
else
read -rsN1 char
fi
beetag-help
if [[ $char == $'\n' ]]; then
break
fi
case $char in
";")
j=$(( j - 2 ))
break
;;
"'")
if $doplay; then
echo "play toggled off"
doplay=false
else
doplay=true
mpvrpc-loadfile "$path"
erasable_line=false
fi
beetag-nostatus 1
continue
;;
_)
m beet rm --delete --force "id:$id"
beetag-nostatus 4 # guessing. dont want to test atm
break
;;
[1-5])
beetmq "id:$id" rating=$char
continue
;;
9)
volume=$(( volume - 5 ))
if (( volume < 0 )); then
volume=0
fi
;;&
0)
volume+=5
if (( volume > 130 )); then
volume=130
fi
;;&
0|9)
mpvrpc '{ "command": ["set_property", "volume", '$volume'] }'
beetag-status
echo volume=$volume
continue
;;
']')
if $repeat1; then
repeat1=false
else
repeat1=true
fi
echo repeat1=$repeat1
continue
;;
q)
kill-bg-quiet
return
;;
y)
if $do_rare_genres; then
do_rare_genres=false
button_map=(${common_genres[@]} ${pl_tags[@]})
last_genre_i=$(( ${#rare_genres[@]} - 1 ))
else
do_rare_genres=true
button_map=(${rare_genres[@]} ${pl_tags[@]})
last_genre_i=$(( ${#rare_genres[@]} - 1 ))
fi
local -A button_i
for (( i=0; i<${#buttons[@]}; i++ )); do
button_i[${buttons[i]}]=$i
done
for (( i=0; i<${#button_map[@]}; i++ )); do
echo ${buttons[i]} ${button_map[i]}
done
continue
;;
z)
beetag-nostatus 3
# if we ctrl-z, it will put the whole function into sleep. so
# basically, we can't return from a foregrounded mpv like we
# would like to without some strange mechanism I can't think
# of. So, instead, detect ctrl-c and wait a while for prompt
# input. One idea would be to use a music player like mpd where
# we can send it messages.
if ! fg; then
read_wait=10
fi
continue
;;
#
" ")
# output time if we aren't already paused
if [[ $(mpvrpco '{ "command": ["get_property", "pause"] }' | jq .data) == false ]]; then
# minutes/seconds
#date -d @"$(mpvrpco '{ "command": ["get_property", "playback-time"] }' | jq .data)" +%M:%S ||:
beetag-status
mpvrpc-percent-pos
fi
# originally found this solution, which worked fine.
#kill -STOP %% &>/dev/null
#
mpvrpc '{ "command": ["cycle", "pause"] }'
continue
;;
"$escape_char")
expected_input=true
read -rsn2 escaped_input
skip_input_regex="^[0-9]+$"
case $escaped_input in
# up char: show all the songs, use less
'[A')
skip_start=0
skip_lookback=5
if (( j - skip_lookback > skip_start )); then
skip_start=$(( j - skip_lookback ))
fi
beetag-nostatus $(( id_count - skip_start - 1 ))
{
line_int=0
for (( i=skip_start; i < id_count; i++ )); do
if (( i == j )); then
echo " * ${ls_lines[i]}"
continue
fi
echo "$line_int | ${ls_lines[i]}"
line_int+=1
done
} | less -F
;;
# down char
'[B')
# skip forward, but show the last few songs anyways.
skip_start=0
skip_lookback=3
if (( j - skip_lookback > skip_start )); then
skip_start=$(( j - skip_lookback ))
fi
beetag-nostatus $(( id_count - skip_start - 1 ))
line_int=0
overflow_lines=$LINES
for (( i=skip_start; i < overflow_lines - 1 && i < id_count; i++ )); do
ls_line="${ls_lines[i]}"
overflow=$(( ${#ls_line} / ( COLUMNS - 1 ) ))
overflow_lines=$(( overflow_lines - overflow ))
if (( i == j )); then
echo " * $ls_line"
continue
fi
echo "$line_int | $ls_line"
line_int+=1
done
;;
# left key
'[D')
seek_sec=-8
;;&
# right key
'[C')
seek_sec=8
;;&
'[C'|'[D')
beetag-status
mpvrpc-percent-pos
erasable_line=true
mpvrpc '{ "command": ["seek", "'$seek_sec'"] }'
continue
;;
*)
expected_input=false
;;
esac
if $expected_input; then
read -r skip_input
case $skip_input in
q)
kill-bg-quiet
return
;;
esac
if [[ $skip_input =~ $skip_input_regex ]]; then
pre_j_count=$(( j - skip_start ))
j=$(( j + skip_input - pre_j_count ))
if (( skip_input < pre_j_count )); then
j=$(( j - 1 ))
fi
fi
break
fi
;;
esac
char_i=${button_i[$char]}
new_item=${button_map[$char_i]}
if [[ ! $char_i || ! $new_item ]]; then
echo "error: no mapping of input: $char found, try again"
continue
fi
if (( char_i <= last_genre_i )); then
m beetmq "id:$id" genre=$new_item
else
remove=false
tmp_tags=()
for tag in ${tags[@]}; do
if [[ $new_item == "$tag" ]]; then
remove=true
else
tmp_tags+=("$tag")
fi
done
if $remove; then
tags=("${tags[@]}")
m beetmq "id:$id" "$new_item!"
else
tags+=("$new_item")
m beetmq "id:$id" $new_item=t
fi
fi
done
if ! $repeat1; then
if (( j < id_count - 1 )); then
j+=1
else
j=0
fi
fi
if [[ $playlist ]]; then
echo $j >$pl_state_path
fi
done
}
# usage: FILE|ALBUM_DIR [GENRE]
beetadd() {
local import_path genre_arg single_track_arg
import_path="$1"
if [[ ! -e $import_path ]]; then
echo "beetadd error: path does not exist"
fi
if [[ $2 ]]; then
genre_arg="--set genre=$2"
fi
if [[ -f $import_path ]]; then
single_track_arg=-s
fi
beet import --set totag=t $single_track_arg $genre_arg "$import_path"
beetag totag:t
beet modify -y totag:t "totag!"
}
# update navidrome music data after doing beets tagging
beet2nav() {
m beetpull
m beetconvert
m beetrating
# this function would naturally just be part of beetconvert,
# but we want beetrating to happen sooner so that our ssh auth dialog
# happens earlier. Currently 17 seconds for that.
m beetconvert-rm-extras
m beetsmartplaylists
}
# pull in beets library locally
beetpull() {
local sshfs_host sshfs_cmd
sshfs_host=b8.nz
if [[ $HOSTNAME == kd ]]; then
return 0
fi
if [[ ! -e /i ]]; then
s mkdir /i
s chown iank:iank /i
fi
sshfs_cmd="sshfs -o ServerAliveInterval=15,reconnect $sshfs_host:/i /i"
if ! pgrep -f "^$sshfs_cmd$" >/dev/null; then
m $sshfs_cmd
fi
}
# remove all playlists in navidrome, for when I make big
# playlist name changes and just want to scrap everything.
nav-rm-plists() {
local tmpf id
tmpf=$(mktemp)
if [[ $HOSTNAME != kd ]]; then
echo "error: run on kd"
return 1
fi
sqlite3 /i/navidrome/navidrome.db "select id from playlist" >$tmpf
while read -r id; do
curl --http1.1 --user "iank:$navidrome_pw" "https://b8.nz/rest/deletePlaylist.view?u=iank&s=sb219dvv7egnoe4i47k75cli0m&t=1c8f5575cd0fdf03deb971187c9c88b1&v=1.2.0&c=DSub&id=$id"
done <$tmpf
rm $tmpf
}
# escape regex.
#
# This is not perfect but generally good enough. It escapes all
# metachars listed man 3 pcrepattern.
er() {
sed 's/[]\\^$.[|()?*+{}]/[&]/g; s/\^/\\^/g' <<<"$*"
}
# usage beegenre QUERY
#
# beet set genre for QUERY based on existing artist most used genre on
#
# inverse of query for each artist found in QUERY. If query starts with
# "artist:" it is used as the artist instead of each artist in QUERY.
#
beegenre() {
local count artist artregex genre singleartist tmpf tmpf2
local -a artists genres
singleartist=false
case $1 in
artist:*)
singleartist=true
artist="$1"
shift
;;
esac
tmpf=$(mktemp)
tmpf2=$(mktemp)
if $singleartist; then
# shellcheck disable=SC2016 # obvious reason
beet ls -f '$genre' "$artist" "${@/#/^}" | sort | uniq -c | sort -n | tail -n1 >$tmpf
read -r count genre <$tmpf ||:
beet modify "$artist" "$@" genre=$genre
else
# shellcheck disable=SC2016 # obvious reason
beet ls -f '$artist' "$@" | sort -u >$tmpf
while read -r artist; do
artregex=$(er "$artist")
# shellcheck disable=SC2016 # obvious reason
beet ls -f '$genre' "artist::^$artregex$" "${@/#/^}" | sort | uniq -c | sort -n | tail -n1 >$tmpf2
read -r count genre <$tmpf2 || continue
if [[ $count ]]; then
artists+=("$artregex")
genres+=("$genre")
echo "beet modify -y $* \"artist::^$artist$\" genre=$genre # $count"
fi
done <$tmpf
read -r -N 1 -s -p "Y/n " char
case $char in
[Yy$'\n'])
for (( i=0; i<${#artists[@]}; i++ )); do
beet modify -y "$@" "artist::^${artists[i]}$" genre=${genre[i]}
done
;;
esac
fi
rm $tmpf
}
# prettify the date
btrbk-date() {
local indate
indate="$1"
shift
date +%F_%T%:::z -d "$(sed -r 's/(.{4})(..)(.{5})(..)(.*)/\1-\2-\3:\4:\5/' <<<"$indate")" "$@"
}
btrbk-undate() {
# fudCaHougfirp is a random string
{ if [[ $1 ]]; then
echo "$1"
else
cat
fi
} | sed -r 's/-0([45])( |$)/fudCaHougfirp0\100/;s/_/T/;s/[:-]//g;s/fudCaHougfirp/-/'
}
btrbk-date-sed() {
local line
while read -r line; do
if [[ $line == *20[0-9][0-9][0-9][0-9][0-9][0-9]T[0-9][0-9][0-9][0-9][0-9][0-9]-0[45]00* ]]; then
pre="${line%%20[0-9][0-9][0-9][0-9][0-9][0-9]T[0-9][0-9][0-9][0-9][0-9][0-9]-0[45]00*}"
post="${line##*20[0-9][0-9][0-9][0-9][0-9][0-9]T[0-9][0-9][0-9][0-9][0-9][0-9]-0[45]00}"
mid="${line:${#pre}:22}"
echo "$pre$(btrbk-date "$mid")$post"
else
echo "$line"
fi
done
}
jrbtrbk() {
jr -u btrbk-run -u btrbk -u switch-mail-host "$@"
}
# internal function
btrbk-host-debug-show-host() {
for f; do
snaphost=
for host in $remote $alt local; do
if line=$(grep -P "\S*$f" /tmp/b/s/$host.log); then
if [[ $snaphost ]]; then
e error: snaphost=$snaphost, host=$host line="$line"
fi
if [[ $line == ssh* ]]; then
tmp="${line#ssh://}"
snaphost="${tmp%%/*}"
else
snaphost=$host
fi
fi
done
echo $snaphost $f | btrbk-date-sed
done
}
# If we get a btrfs receive error like this:
# ERROR: ... clone: did not find source subvol
# running this command will help track down the problem.
# Alter remote= and alt=. When I used it, remote is
# the host having the error when I push a snapshot.
# Alt is just the other host that takes snapshots
# besides the local host.
btrbk-host-debug() {
remote=b8.nz
alt=sywg.b8.nz
mkdir -p /tmp/b/s
for host in $remote $alt; do
h=$(ssh $host hostname)
rsync -a /var/log/btrbk $host:/var/log/btrbk /var/log/btrbk/$h
grr '\bsnapshot success' /var/log/btrbk/$h >/tmp/b/$h.log
## this takes a while, we only want to do it on 1st run
# if [[ -s /tmp/b/$host.log ]]; then continue; fi
# ssh $host journalctl -u btrbk-run -u btrbk -u switch-mail-host >/tmp/b/$host.log
done
gr '\bsnapshot success' /var/log/btrbk/*.log >/tmp/b/local.log
cd /tmp/b
for f in *.log; do
gr '\bsnapshot success' $f >s/$f
done
cd /mnt/root/btrbk
localq=(q.*)
declare -A localq_a
for f in "${localq[@]}"; do
localq_a[$f]=t
done
remoteq=()
for f in $(ssh $remote "cd /mnt/root/btrbk; echo q.*"); do
if [[ ! ${localq_a[$f]} ]]; then
remoteq+=($f)
fi
done
btrbk-host-debug-show-host "${localq[@]}"
if (( ${#remoteq[@]} >= 1 )); then
echo "=== $remote only ===="
btrbk-host-debug-show-host ${remoteq[@]}
fi
}
# note, to check for glue records
# First, find some the .org nameservers:
# dig +trace iankelling.org
# then, query one:
# dig ns1.iankelling.org @b0.org.afilias-nst.org.
# Now, compare for a domain that does have glue records setup (note the A
# and AAAA records in ADDITIONAL SECTION, those are glue records like the
# one I'm asking for):
# $ dig ns1.gnu.org @b0.org.afilias-nst.org.
bbk() { # btrbk wrapper
local ret=0
c /
local active=true
systemctl is-active btrbk.timer || active=false
if $active; then
ser stop btrbk.timer
fi
btrbk_is_active=$(systemctl is-active btrbk.service ||:)
case $btrbk_is_active in
inactive|failed) : ;;
*)
echo "bbk: error: systemctl is-active btrbk.service output: $btrbk_is_active"
if $active; then ser start btrbk.timer; fi
return 1
;;
esac
# todo: consider changing this to srun and having the args come
# from a file like /etc/default/btrbk, like is done in exim
s jdo btrbk-run "$@"
if $active; then
if (( ret )); then
echo bbk: WARNING: btrbk.timer not restarted due to failure
else
ser start btrbk.timer
fi
fi
return $ret
}
faimon() {
fai-monitor | pee cat "fai-monitor-gui -"
}
bfg() { java -jar /a/opt/bfg-1.12.14.jar "$@"; }
bigclock() {
xclock -digital -update 1 -face 'arial black-80:bold'
}
nnn() { /a/opt/nnn -H "$@"; }
locat() { # log-once cat
local files
ngset
files=(/var/local/cron-errors/* /home/iank/cron-errors/* /sysd-mail-once-state/*)
case ${#files[@]} in
0) : ;;
1)
echo ${files[0]}
head ${files[0]}
;;
*)
head ${files[@]}
;;
esac
ngreset
}
scr() {
screen -RD "$@"
}
# usage: first get an adb shell on the phone.
#
# just followed instructions in readme at
# https://github.com/Yuubi-san/ceb-tools
# tried to use ceb2txt but it failed because of schema
# slightly different than what it expected.
cheogram-get-logs() {
#adb shell rm -r /storage/emulated/0/Download/Cheogram/Backup
read -r -p "do cheogram backup on phone, do not enable extra cheogram data. press any key when done"
cd /p/cheogram
rm -rf Backup b
adb pull /storage/emulated/0/Download/Cheogram/Backup
sqlite3 b $(( (EPOCHSECONDS - days * 60 * 60 * 24) * 1000 ))
order by timeSent;"
sqlite3 /p/cheogram/b ".mode tabs" "$q" | less
}
mycheologs() {
local days q
days=${1:-16}
# timezone compared to utc. note: this takes the current offset, so if daylight savings change
# happened in the looking back period, this won't account for it.
zone_offset=$(( $( date +%z | sed 's/[^1-9-]*//g' ) * 60 * 60))
case $zone_offset in
-*) : ;;
*) zone_offset="+ $zone_offset"
esac
echo zone_offset=$zone_offset
q="
select
datetime(substr(timeSent,0,11) $zone_offset, 'unixepoch'),
body
from messages
where timeSent > $(( (EPOCHSECONDS - days * 60 * 60 * 24) * 1000 ))
and counterpart = 'office@conference.fsf.org/iank'
order by timeSent;"
sqlite3 /p/cheogram/b ".mode tabs" "$q" | sed 's/ /./' | less
}
# version of jdo for my non-root user
jdo() {
# comparison of alternative logging methods:
#
# systemd-run command (what this function does)
#
# If there is a user prompt, the program will detect that it is not
# connected to a terminal and act in a non-interactive way, skipping
# the prompt. This has the benefit that you know exactly how the
# program will act if you want to move it into a service that runs
# automatically.
#
# If run with sudo and command is a shell script which does a sleep,
# it can (sometimes?) output some extra whitespace in front of
# messages, more for each subsequent message. This can be avoided by
# becoming root first.
#
# It logs the command's pid and exit code, which is nice.
#
#
### command |& ts | tee file.log
#
# If there is a user prompt, like "read -p prompt var", it will hang
# without outputting the prompt.
#
# I've had a few times where ts had an error and I wasn't totally sure
# if it was really the command or ts having the problem.
#
# Sometimes some output will get hidden until you hit enter.
#
#
### command |& pee cat logger
#
# This seems to work. I need to test more.
#
#
### command |& logger -s
#
# User prompts get confusingly prefixed to earlier output, and all log
# entries get prefixed with annoying priority level.
#
#
### systemd-cat
#
# Had a few problems. One major one is that it exited in the middle of
# a command on systemctl daemon-reload
#
# Related commands which can log a whole session: script, sudo, screen
local cmd cmd_name jr_pid ret
ret=0
cmd="$1"
shift
cmd_name=${cmd##*/}
if [[ $cmd != /* ]]; then
cmd=$(type -P "$cmd")
fi
#note date format for since is date '+%F %T'
# -q = quiet
journalctl --since=now -qn2 -f -u "$cmd_name" &
jr_pid=$!
# note, we could have a version that does system --user, but if for example
# it does sudo ssh, that will leave a process around that we can't kill
# and it will leave the unit hanging around in a failed state needing manual
# killing of the process.
s systemd-run --uid "$(id -u)" --gid "$(id -g)" \
-E SSH_AUTH_SOCK=/run/openssh_agent \
--unit "$cmd_name" --wait --collect "$cmd" "$@" || ret=$?
# The sleep lets the journal output its last line
# before the prompt comes up.
sleep .5
kill $jr_pid &>/dev/null ||:
unset jr_pid
fg &>/dev/null ||:
# this avoids any err-catch
(( ret == 0 )) || return $ret
}
# service run, and watch the output
srun() {
local unit
ret=0
unit=$1
journalctl -qn2 -f -u $unit &
systemctl start $unit
sleep 2
kill $jr_pid &>/dev/null ||:
unset jr_pid
fg &>/dev/null ||:
}
sm() { # switch mail host
local tmp keyhash
c /
# run latest
keyhash=$(s ssh-keygen -lf /root/.ssh/home | awk '{print $2}')
tmp=$(s ssh-add -l | awk '$2 == "'$keyhash'"' ||:)
if [[ ! $tmp ]]; then
s ssh-add /root/.ssh/home
fi
s jdo switch-mail-host "$@"
return $ret
}
sh2() { # switch host2
local tmp keyhash
c /
# run latest
keyhash=$(s ssh-keygen -lf /root/.ssh/home | awk '{print $2}')
tmp=$(s ssh-add -l | awk '$2 == "'$keyhash'"')
if [[ ! $tmp ]]; then
s ssh-add /root/.ssh/home
fi
install-my-scripts
s jdo switch-host2 "$@"
return $ret
}
# shellcheck disable=SC2120
lipush() {
# note, i had --delete-excluded, but that deletes all files in --exclude-from on
# the remote site, which doesn't make sense, so not sure why i had it.
local p a
# excluding emacs for now
#p=(/a/opt/{emacs-debian11{,-nox},mu,emacs} /a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts})
p=(
/a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts}
/c/roles/prom_export/files/simple/usr/local/bin/fsf-install-node-exporter
/a/opt/fpaste
/p/c/user-specific/www-data/icecast-fsf{,-tech}-htpasswd
/p/c/icecast.xml
)
a="-ahviSAXPH --specials --devices --delete --relative --exclude-from=/p/c/li-rsync-excludes"
ret=0
for h in li je bk; do
m s rsync "$@" $a ${p[@]} /p/c/machine_specific/$h root@$h.b8.nz:/
## only li is debian11
#p[0]=/a/opt/emacs-trisuqel10
#p[1]=/a/opt/emacs-trisquel10-nox
done
m s rsync "$@" -ahviSAXPH root@li.b8.nz:/a/h/proposed-comments/ /a/h/proposed-comments || ret=$?
return $ret
}
bkpush() { # no emacs. for running faster.
p=(/a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts} /c/roles/prom_export/files/simple/usr/local/bin/fsf-install-node-exporter)
a="-ahviSAXPH --specials --devices --delete --relative --exclude-from=/p/c/li-rsync-excludes"
ret=0
m rsync "$@" $a ${p[@]} /p/c/machine_specific/bk root@bk.b8.nz:/ || ret=$?
return $ret
}
jepush() { # no emacs. for running faster.
p=(/a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts} /c/roles/prom_export/files/simple/usr/local/bin/fsf-install-node-exporter)
a="-ahviSAXPH --specials --devices --delete --relative --exclude-from=/p/c/li-rsync-excludes"
ret=0
m rsync "$@" $a ${p[@]} /p/c/machine_specific/je root@je.b8.nz:/ || ret=$?
return $ret
}
bindpush() {
dsign iankelling.org expertpathologyreview.com zroe.org amnimal.ninja
lipush
for h in li bk; do
m ssh iank@$h.b8.nz dnsup
done
}
bindpushb8() {
lipush
for h in li bk; do
m ssh $h.b8.nz dnsb8
done
}
dnsup() {
conflink -f
m ser reload named
}
dnsb8() {
local f=/var/lib/bind/db.b8.nz
m ser stop named
# jbk is like a temp file. dunno if removing it helps
i=0
while pgrep '^named$' &>/dev/null; do
sleep .5
i=$(( i + 1 ))
if (( i > 100 )); then
echo "dnsb8: error: timeout waiting for named to exit"
return 1
fi
done
m sudo rm -fv $f.jnl $f.signed.jnl $f.jbk
m sudo install -m 644 -o bind -g bind /p/c/machine_specific/vps/bind-initial/db.b8.nz $f
m ser restart named
}
dnsecgen() {
# keys generated like this
# because of https://ftp.isc.org/isc/dnssec-guide/dnssec-guide.pdf
# https://blog.apnic.net/2019/05/23/how-to-deploying-dnssec-with-bind-and-ubuntu-server/
# key length is longer than that guide because
# we are using those at fsf and when old key lengths
# become insecure, I want some extra time to update.
# dnsecgen (in brc2)
local zone=$1
dnssec-keygen -a RSASHA256 -b 2048 $zone
dnssec-keygen -f KSK -a RSASHA256 -b 4096 $zone
for f in K"$zone".*.key; do
# eg Kb8.nz.+008+47995.key tag=47995
# in dnsimple, you add the long string from this.
# in gandi, you add the long string from the .key file,
# then see that the digest matches the ds.
echo "tag is the number after DS"
dnssec-dsfromkey -a SHA-256 $f
done
# For b8.nz, we let bind read the keys and sign, and
# right now they have root ownership, so let them
# get group read.
chmod g+r ./*.private
}
dsign() {
# create .signed file
# note: full paths probably not needed.
local arg
for arg; do
local zone=${arg#db.}
local dir=/p/c/machine_specific/vps/filesystem/var/lib/bind
dnssec-signzone -S -e +31536000 -o $zone -K $dir -d $dir $dir/db.$zone
done
}
# set day start for use in other programs.
# expected to do be in a format like 830, or 800 or 1300.
ds() {
local regex
regex='[0-9]?[0-9]?[0-9][0-9]'
if [[ $1 ]]; then
if [[ ! $1 =~ $regex ]]; then
echo "ds: error. expected \$1 to match $regex, got \$1: $1"
return 1
fi
echo $1 >/b/data/daystart
else
cat /b/data/daystart
fi
}
#### begin bitcoin related things
btc() {
local f=/etc/bitcoin/bitcoin.conf
# importprivkey will timeout if using the default of 15 mins.
# upped it to 1 hour.
bitcoin-cli -rpcclienttimeout=60000 -"$(s grep rpcuser= $f)" -"$(s grep rpcpassword= $f)" "$@"
}
btcusd() { # $1 btc in usd
local price
price="$(curl -s https://api.coinbase.com/v2/prices/BTC-USD/spot | jq -r .data.amount)"
printf "$%s\n" "$price"
if [[ $1 ]]; then
printf "$%.2f\n" "$(echo "scale=4; $price * $1"| bc -l)"
fi
}
usdbtc() { # $1 usd in btc
local price
price="$(curl -s https://api.coinbase.com/v2/prices/BTC-USD/spot | jq -r .data.amount)"
printf "$%s\n" "$price"
if [[ $1 ]]; then
# 100 mil satoshi / btc. 8 digits after the 1.
printf "%.8f btc\n" "$(echo "scale=10; $1 / $price "| bc -l)"
fi
}
satoshi() { # $1 satoshi in usd
local price
price="$(curl -s https://api.coinbase.com/v2/prices/BTC-USD/spot | jq -r .data.amount)"
price=$(echo "scale=10; $price * 0.00000001"| bc -l)
printf "$%f\n" "$price"
if [[ $1 ]]; then
printf "$%.2f\n" "$(echo "scale=10; $price * $1"| bc -l)"
fi
}
# Bitcoin holds open the wallet file. this causes problems for a
# secondary computer running bitcoin and receiving a backup (as of
# 2023). However, in 2024-02, I ran a backup where a receiving machine
# had the wallet enabled and there was no error, so I don't know if this
# is still an issue or likely it is an inconsistent behavior.
# Note: a pruned node won't allow for a wallet to be added, super lame
# so i'm just not running a bitcoin node for now.
# Error: Prune: last wallet synchronisation goes beyond pruned data. You
# need to -reindex (download the whole blockchain again in case of
# pruned node)
#
#### end bitcoin related things
cbfstool () { /a/opt/coreboot/build/cbfstool "$@"; }
cgpl()
{
if (($#)); then
cp /a/bin/data/COPYING "$@"
else
cp /a/bin/data/COPYING .
fi
}
capache()
{
if (($#)); then
cp /a/bin/data/LICENSE "$@"
else
cp /a/bin/data/LICENSE .
fi
}
apache-header() {
# First paragraph is to avoid people being confused about why a
# file is apache licensed.
cat <<'EOF'
# I, Ian Kelling, follow the GNU license recommendations at
# https://www.gnu.org/licenses/license-recommendations.en.html. They
# recommend that small programs, < 300 lines, be licensed under the
# Apache License 2.0. This file contains or is part of one or more small
# programs. If a small program grows beyond 300 lines, I plan to change
# to a recommended GPL license.
# Copyright 2024 Ian Kelling
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EOF
}
# apply apache to git tracked bash files + README, except files with A?GPL3 header.
apache-apply-repo() {
for f in $(git ls-files); do
[[ -L $f || ! -f $f ]] && continue
if [[ $f != README ]]; then
if ! grep -n '^#!/bin/bash' $f | grep ^1: &>/dev/null; then continue; fi
if head -n 10 $f | grep 'it under the terms of the GNU General Public License as published by' &>/dev/null; then continue; fi
fi
apache-apply $f
done
}
apache-apply() {
for file; do
if [[ ! -e $file ]]; then
echo '#!/bin/bash' >$file
chmod +x $file
fi
if head -n1 "$file"| grep -E '^#!/' &>/dev/null; then
{
head -n1 "$file"
apache-header
tail -n+2 "$file"
} | sponge "$file"
else
{
apache-header
cat "$file"
} | sponge "$file"
fi
done
}
# strip out the apache license from a file.
apache-strip() {
# shellcheck disable=SC2044 # meh
for f in $(find . -type f -maxdepth 1); do if head -n1 "$f"| grep -E '^#!/bin/bash\b' &>/dev/null; then { head -n 20 $f | tac | sed '/^# limitations under the License.$/,/^# Copyright.*Ian Kelling$/d' | tac; tail -n+21 $f; } |sponge $f; fi ; done
}
chro() {
if type -p chromium &>/dev/null; then
cmd=chromium
else
cd /
cmd="schroot -c bookworm chromium"
CHROMIUM_FLAGS='--enable-remote-extensions' $cmd & r
fi
}
# do all tee.
# pipe to this, or just type like a shell
# todo: test this
dat() {
tee >(ssh frodo.b8.nz) >(ssh x2) >(ssh tp.b8.nz) >(ssh kw) >(ssh tp.b8.nz)
}
da() { # do all
local host
for host in x2 kw tp.b8.nz x3.b8.nz frodo.b8.nz; do
ssh $host "$@"
done
}
debian_pick_mirror () {
# netselect-apt finds a fast mirror.
# but we need to replace the mirrors ourselves,
# because it doesnt do that. best it can do is
# output a basic sources file
# here we get the server it found, get the main server we use
# then substitute all instances of one for the other in the sources file
# and backup original to /etc/apt/sources.list-original.
# this is idempotent. the only way to identify debian sources is to
# note the original server, so we put it in a comment so we can
# identify it later.
local file
file=$(mktemp -d)/f # safe way to get file name without creating one
sudo netselect-apt -o "$file" || return 1
url=$(grep ^\\w $file | head -n1 | awk '{print $2}')
sudo cp -f /etc/apt/sources.list /etc/apt/sources.list-original
sudo sed -ri "/http.us.debian.org/ s@( *[^ #]+ +)[^ ]+([^#]+).*@\1$url\2# http.us.debian.org@" /etc/apt/sources.list
sudo apt-get update
}
digme() {
digdiff @ns{1,2}.iankelling.org "$@"
}
dup() {
local ran_d
ran_d=false
system-status _
case $PS1 in
*[\ \]]D\ *)
pushd /
/b/ds/distro-begin |& ts || return $?
/b/ds/distro-end |& ts || return $?
popd
ran_d=true
;;&
*[\ \]]DB\ *)
pushd /
/b/ds/distro-begin |& ts || return $?
popd
ran_d=true
;;
*[\ \]]DE\ *)
pushd /
/b/ds/distro-end |& ts || return $?
popd
ran_d=true
;;&
*CONFLINK*)
if ! $ran_d; then
conflink
fi
;;
esac
system-status _
}
envload() { # load environment from a previous: export > file
local file=${1:-$HOME/.${USER}_env}
eval "$(export | sed 's/^declare -x/export -n/')"
while IFS= read -r line; do
# declare -x makes variables local to a function
eval ${line/#declare -x/export}
done < "$file"
}
failfunc() { asdf a b c; }
failfunc2() { failfunc d e f; }
# one that comes with distros is too old for newer devices
fastboot() {
/a/opt/android-platform-tools/fastboot "$@";
}
kdecd() { /usr/lib/x86_64-linux-gnu/libexec/kdeconnectd; }
batp() {
cat /sys/class/power_supply/BAT0/capacity
}
# List of apps to install/update
# Create from existing manually installed apps by doing
# fdroidcl update
# fdroidcl search -i, then manually removing
# automatically installed/preinstalled apps
#
# # my attempt at recovering from boot loop:
# # in that case, boot to recovery (volume up, home button, power, let go of power after samsun logo)
# # then
# mount /dev/block/mmcblk0p12 /data
# cd /data
# find -iname '*appname*'
# rm -rf FOUND_DIRS
# usually good enough to just rm -rf /data/app/APPNAME
#
# currently broken:
# # causes replicant to crash
# org.quantumbadger.redreader
# org.kde.kdeconnect_tp
# not broke, but wont work without gps
#com.zoffcc.applications.zanavi
# not broke, but not using atm
#com.nutomic.syncthingandroid
# # doesn\'t work on replicant
#net.sourceforge.opencamera
#
fdroid_pkgs=(
net.mullvad.mullvadvpn
org.schabi.newpipe
io.github.subhamtyagi.lastlauncher
io.anuke.mindustry
com.biglybt.android.client
de.marmaro.krt.ffupdater
me.ccrama.redditslide
org.fedorahosted.freeotp
at.bitfire.davdroid
com.alaskalinuxuser.justnotes
com.artifex.mupdf.viewer.app
com.danielkim.soundrecorder
com.fsck.k9
com.ichi2.anki
com.jmstudios.redmoon
com.jmstudios.chibe
org.kde.kdeconnect_tp
com.notecryptpro
com.termux
cz.martykan.forecastie
de.danoeh.antennapod
de.blinkt.openvpn
de.marmaro.krt.ffupdater
eu.siacs.conversations
free.rm.skytube.oss
im.vector.alpha # riot
info.papdt.blackblub
me.tripsit.tripmobile
net.gaast.giggity
net.minetest.minetest
net.osmand.plus
org.isoron.uhabits
org.linphone
org.gnu.icecat
org.smssecure.smssecure
org.yaaic
sh.ftp.rocketninelabs.meditationassistant.opensource
)
# https://forum.xda-developers.com/android/software-hacking/wip-selinux-capable-superuser-t3216394
# for maru,
#me.phh.superuser
fdup() {
local -A installed updated
local p
# tried putting this in go buildscript cronjob,
# but it failed with undefined: os.UserCacheDir. I expect its due to
# an environment variable missing, but its easier just to stick it here.
m go get -u mvdan.cc/fdroidcl || return 1
m fdroidcl update
if fdroidcl search -u | grep ^org.fdroid.fdroid; then
fdroidcl install org.fdroid.fdroid
sleep 5
m fdroidcl update
fi
for p in $(fdroidcl search -i| grep -o "^\S\+"); do
installed[$p]=true
done
for p in $(fdroidcl search -u| grep -o "^\S\+"); do
updated[$p]=false
done
for p in ${fdroid_pkgs[@]}; do
if ! ${installed[$p]:-false}; then
m fdroidcl install $p
# sleeps are just me being paranoid since replicant has a history of crashing when certain apps are installed
sleep 5
fi
done
for p in ${!installed[@]}; do
if ! ${updated[$p]:-true}; then
m fdroidcl install $p
sleep 5
fi
done
}
firefox-default-profile() {
local key value section
key=Default
value=1
section=$1
file=/p/c/subdir_files/.mozilla/firefox/profiles.ini
sed -ri "/^ *$key/d" "$file"
sed -ri "/ *\[$section\]/,/^ *\[[^]]+\]/{/^\s*${key}[[:space:]=]/d};/ *\[$section\]/a $key=$value" "$file"
}
fdhome() { #firefox default home profile
firefox-default-profile Profile0
}
fdwork() {
firefox-default-profile Profile4
}
ff() {
if type -P firefox &>/dev/null; then
firefox "$@"
else
iceweasel "$@"
fi
}
fn() {
firefox -P alt "$@" >/dev/null 2>&1
}
fsdiff () {
local missing=false
local dname="${PWD##*/}"
local m="/a/tmp/$dname-missing"
local d="/a/tmp/$dname-diff"
[[ -e $d ]] && rm "$d"
[[ -e $m ]] && rm "$m"
local msize=0
local fsfile
while read -r line; do
fsfile="$1${line#.}"
if [[ -e "$fsfile" ]]; then
md5diff "$line" "$fsfile" && tee -a "/a/tmp/$dname-diff" <<< "$fsfile $line"
else
missing=true
echo "$line" >> "$m"
msize=$((msize + 1))
fi
done < <(find . -type f )
if $missing; then
echo "$m"
(( msize <= 100 )) && cat $m
fi
}
fsdiff-test() {
local tmpd x
# expected output, with different tmp dirs
# /tmp/tmp.HDPbwMqdC9/c/d ./c/d
# /a/tmp/tmp.qLDkYxBYPM-missing
# ./b
tmpd="$(mktemp -d)"
cd "$tmpd"
echo ok > a
echo nok > b
mkdir c
echo ok > c/d
local x
x=$(mktemp -d)
mkdir $x/c
echo different > $x/c/d
echo ok > $x/a
fsdiff $x
rm -r "$x" "$tmpd"
}
rename-test() {
# test whether missing files were renamed, generally for use with fsdiff
# $1 = fsdiff output file, $2 = directory to compare to. pwd = fsdiff dir
# echos non-renamed files
local x line found renamed
local -a sums
for x in "$2"/*; do
{ sums+=( "$(md5sum < "$x")" ) ; } 2>/dev/null
done
while read -r line; do
{ missing_sum=$(md5sum < "$line") ; } 2>/dev/null
renamed=false
for x in "${sums[@]}"; do
if [[ $missing_sum == "$x" ]]; then
renamed=true
break
fi
done
$renamed || echo "$line"
done < "$1"
return 0
}
feh() {
# F = fullscren, z = random, Z = auto zoom
command feh --auto-rotate -FzZ "$@"
}
fw() {
firefox -P default "$@" >/dev/null 2>&1
}
gitian() {
git config user.email ian@iankelling.org
}
# at least in flidas, things rely on gpg being gpg1
gpg() {
if type -P gpg2 &>/dev/null; then
command gpg2 "$@"
else
command gpg "$@"
fi
}
gse() {
local email=iank@fsf.org
git send-email --notes "--envelope-sender=<$email>" \
--suppress-cc=self "$@"
}
gup() { /a/f/gnulib/build-aux/gnupload "$@"; }
dejagnu() { /a/opt/dejagnu/dejagnu "$@"; }
# do git status on published repos.
hstatus() {
c /a/bin/githtml
for x in *; do
cd "$(readlink -f $x)"/..
status=$(i status -s) || pwd
if [[ $status ]]; then
hr
echo $x
printf "%s\n" "$status"
fi
cd /a/bin/githtml
done
}
hsk() {
local x
c /a/bin/githtml
for x in *; do
cd "$(readlink -f $x)"/..
skgit
cd /a/bin/githtml
done
}
## work log
#
# note: database location is specified in ~/.timetrap.yml, currently /p/.timetrap.db
wlog() {
local day i days_back
days_back=${1:-16}
for (( i=days_back; i>=0; i-- )); do
day=$( date +%F -d @$((EPOCHSECONDS - 86400*i )) )
date "+%a %b %d" -d @$((EPOCHSECONDS - 86400*i )) | tr '\n' ' '
/a/opt/timetrap/bin/t d -ftotal -s $day -e $day all -m '^w|lunch$'
done
}
to() { t out -a "$@"; }
ti() { t in -a "$@"; }
tl() {
local in_secs
to "$*"
t s lunch
t in -a "$*"
in_secs="$(date -d "${*//[_.]/ }" +%s)"
m t out -a "$(date +%F.%T -d @$(( in_secs + 60*45 )) )"
t s w
}
# help me focus. opens 2 windows.
focus() {
/p/c/proc/focus/linux-amd64/focus &
watcharb5
kill %%
}
# Display a list of the active window title
# i've been on with 10 second samples going back
# 5 minutes. If I've been on one window for 10 seconds
# or longer, then display the second count.
#
# Press any key to exit.
watcharb5() {
local char ret
killall arbtt-capture &>/dev/null ||:
rm -f ~/.arbtt/capture.log
arbtt-capture --sample-rate=10 &
while true; do
arb5
ret=0
# i first thought to sleep and capture ctrl-c, but it seems we can't
# capture control-c, unless maybe we implement the commands in a
# separate script or maybe add err-cleanup to err. Anyways, this
# method is superior because any single char exits.
read -rsN1 -t 5 char || ret=$?
if (( ret == 142 )) || [[ ! $char ]]; then
# debug
#e ret=$ret char=$char
:
else
killall arbtt-capture ||:
return 0
fi
done
}
arb5() {
local i j l sec blanks line
local -a arbtt_lines
if [[ ! -e ~/.arbtt/capture.log ]]; then
sleep 5
fi
blanks=$(( LINES - 34 ))
for (( i=0; i < blanks; i++ )); do
echo
done
{
i=0
j=0
# https://stackoverflow.com/questions/56486272/how-to-concat-multiple-fields-to-same-line-with-jq
arbtt_lines=$(arbtt-dump -l 30 -t json | \
jq -r '.[] | [ ( .inactive / 1000 | floor ) , ( .windows[] | select (.active == true) |.title) ] | @tsv' | tac)
for line in "${arbtt_lines[@]}"; do
read -r sec l <<<"$line"
if (( j >= LINES )); then
break
fi
if (( i % 6 == 0 && i >= 2 )); then
j=$(( j + 1 ))
echo "## $(( i / 6 + 1 )) ##"
fi
if (( sec > 10 )); then
printf "%3d %s\n" $sec "$l" | sed -r "s/^(.{$COLUMNS}).*/\1/"
else
printf " %s\n" "$l" | sed -r "s/^(.{$COLUMNS}).*/\1/"
fi
i=$(( i + 1 ))
j=$(( j + 1 ))
done
while (( j < 34 && j < LINES )); do
echo
j=$(( j + 1 ))
done
} | tac
}
arbttlog() {
# from the log, show only the currently active window, and the number of
# seconds of input inactivity.
arbtt-dump "$@" | grep -v '( )\|Current Desktop' | sed -rn '/^[^ ]/{N;s/^(.{21})([0-9]*)[0-9]{3}m.*\(\*/\1\2/;s/^(.{21})[0-9]*.*\(\*/\1/;s/\n//;p}' ; }
idea() {
/a/opt/idea-IC-163.7743.44/bin/idea.sh "$@" & r
}
ilogs-local() {
d=/var/lib/znc/moddata/log/iank/
for n in freenode libera; do
cd $d/$n
hr
for x in "#$1/"*; do
base=${x##*/}
files=()
for f in $@; do
tmp=\#$f/$base
if [[ -e $tmp ]]; then
files+=(\#$f/$base)
fi
done
sed \"s/^./${base%log}/\" ${files[@]}|sort -n
hr
done
done
}
ilogs() {
sl root@iankelling.org ilogs-local "$@"
}
ilog-local() {
local d chan
chan="$1"
d=/var/lib/znc/moddata/log/iank/
for n in freenode libera; do
if [[ ! -d $d$n/"$chan" ]]; then
continue
fi
cd $d$n/"$chan"
hr
for x in *; do
# *** are parts and joins and such, and they make reading hard.
# I probably will want to see them sometimes, just have to
# remove that part.
echo $x; sed "s/^./${x%log}/;/\*\*\*/d" $x; hr;
done
done
}
ilog() {
local chan tmpf
tmpf=$(mktemp)
chan="${1:-#fsfsys}"
# use * instead of -r since that does sorted order
sl root@li.b8.nz ilog-local "$chan" > $tmpf
less +G $tmpf
rm -f $tmpf
}
o() {
if type gio &> /dev/null ; then
gio open "$@"
elif type gvfs-open &> /dev/null ; then
gvfs-open "$@"
else
xdg-open "$@"
fi
# another alternative is run-mailcap
}
ccomp xdg-open o
# jfilter() {
# grep -Evi -e "^(\S+\s+){4}(sudo|sshd|cron)\[\S*:" \
# -e "^(\S+\s+){4}systemd\[\S*: (starting|started) (btrfsmaintstop|dynamicipupdate|spamd dns bug fix cronjob|rss2email)\.*$"
# }
# jtail() {
# journalctl -n 10000 -f "$@" | jfilter
# }
# jr() { journalctl "$@" | jfilter | less ; }
# jrf() { journalctl -n 200 -f "$@" | jfilter; }
## old version for model01. i need to get that firmware working again.
# kff() { # keyboardio firmware flash. you must hold down the tilde key
# pushd /a/opt/Model01-Firmware
# # if we didn't want this yes hack, then remove "shell read" from
# # /a/opt/Kaleidoscope/etc/makefiles/sketch.mk
# yes $'\n' | VERBOSE=1 make flash
# popd
# }
kff() {
pushd /a/opt/Kaleidoscope/examples/Devices/Keyboardio/Model100
make flash
popd
}
wgkey() {
local umask_orig name
if (( $# != 1 )); then
e expected 1 arg >&2
return 1
fi
name=$1
umask_orig=$(umask)
umask 0077
wg genkey | tee $name-priv.key | wg pubkey > $name-pub.key
umask $umask_orig
}
host-info-all() {
host-info-update
bindpushb8
# for wireguard configs
ssh iank@li.b8.nz conflink
wrt-setup
}
## for updating host info like ip, location, update /p/c/host-info and
## host_info below. the host_info array should probably be in its own
## file that gets sourced so that it can be more easily updated.
# todo: this is so long that it becomes confusing,
# try to split it up.
#
# To make some changes take effect, run host-info-all.
host-info-update() {
local -A vpn_ips host_ips host_macs portfw_ips nonvpn_ips all_ips
local -a root_hosts nonroot_hosts
# the hosts with no mac
root_hosts=( bk je li b8.nz )
for h in ${root_hosts[@]}; do
root_hosts+=(${h}ex)
done
root_hosts+=(cmc)
while read -r ip host mac opts; do
if [[ $ip == *#* || ! $host ]]; then continue; fi
# opt parsing
vpn=false
root=false
for opt in $opts; do
case $opt in
user=root)
root=true
;;
vpn)
vpn=true
;;
esac
done
all_ips[$host]=$ip
if $vpn; then
portfw_ips[$host]=$ip
vpn_ips[$host]=$ip
else
nonvpn_ips[$host]=$ip
fi
if $root; then
# note: the reason we have b8.nz suffix here but not for non_root
# hosts is that it is for the User part, the IdentityFile part is
# redundant to *.b8.nz. Also note ${host}i, we only setup those for vpn hosts, but there is no harm in overspecifying here.
root_hosts+=($host ${host}i $host.b8.nz ${host}i.b8.nz)
# shellcheck disable=SC2004 # false positive
root_hosts_a[$host]=t # a for associative array
else
nonroot_hosts+=($host ${host}i)
fi
host_ips[$host]=$ip
if [[ $mac ]]; then
host_macs[$host]=$mac
fi
done
/p/c/cmc-firewall-data
local host ipsuf f files
# shellcheck disable=SC2016 # shellcheck doesnt know this is sed
sedi '/edits below here are made automatically/,$d' /p/c/machine_specific/li/filesystem/etc/wireguard/wgmail.conf
for host in ${!vpn_ips[@]}; do
if [[ ${root_hosts_a[$host]} ]]; then
# root machines dont actually need vpn, but
# the classification still helps with other
# configurations.
continue
fi
ipsuf=${vpn_ips[$host]}
wghole $host $ipsuf
u /b/ds/machine_specific/li/filesystem/etc/openvpn/client-config-hole/$host <$tmpf
cedit -e work-identity /p/c/subdir_files/.ssh/config-static <$tmpf
rm -f $tmpf
### begin focus on hosts file update ###
#
# This started as its own function, but it actually
# needed to alter the ssh config, so combined it.
#
# background: This is finally doing dynamic ip resolution via the hosts
# file. I considered detecting where each host was dynamically or
# something, but ultimately decided to mostly avoid that, other than
# detecting the status of the current machine I'm on. I want to be able
# to move it around without having to manually type much of anything.
local -a host_domain_suffix hosts
local -A ip_to_hosts
local suf ip i host at_home suf_from_here
source /p/c/domain-info
at_home=false
if ip n | grep -q "10.2.0.1 .* b4:75:0e:fd:06:4a"; then
at_home=true
fi
for i in ${host_domain_suffix[@]}; do
if [[ $i == *.* ]]; then
suf=$i
continue
fi
hosts+=($i)
if [[ $i == "$HOSTNAME" ]]; then
unset "portfw_ips[$i]"
continue
fi
suf_from_here=$suf
if ! $at_home && [[ $suf == .b8.nz || $suf == [wc].b8.nz ]]; then
suf_from_here=i.b8.nz
else
unset "portfw_ips[$i]"
fi
# note this might be outdated until we do a dns push
ip=$(dig +short "$i$suf_from_here" @iankelling.org | tail -n1) ||:
if [[ ! $ip ]]; then
if [[ $suf == .office.fsf.org ]]; then
suf_from_here=wg.b8.nz
ip=$(getent ahostsv4 "$i$suf_from_here" | awk '{ print $1 }' | head -n1) ||:
fi
fi
if [[ $ip ]]; then
ip_to_hosts[$ip]+=" $i"
else
echo error: failed to get ip of "$i$suf_from_here"
fi
done
for ip in "${!ip_to_hosts[@]}"; do
echo "$ip${ip_to_hosts[$ip]}"
done | s cedit -e hosts-file-up /etc/hosts
for host in ${hosts[@]}; do
echo $host
done >/p/c/subdir_files/.dsh/group/btrbk
### end focus on hosts file update ###
# note: note sure if this is a great way to check.
# todo: think about it
if $at_home; then
# possible that in the future we want to create
# a dynamic file here, and then we can move the cat
# command above out of the conditional
rsync -a /p/c/subdir_files/.ssh/config-static ~/.ssh/config
else
for host in ${!portfw_ips[@]}; do
ipsuf=${portfw_ips[$host]}
cat < ~/.ssh/config-dynamic
cat /p/c/subdir_files/.ssh/config-static ~/.ssh/config-dynamic >~/.ssh/config
fi
}
# usage host ipsuf [extrahost]
#
# If the keys already exist and you want new ones, remove them:
# rm /p/c/machine_specific/$host/filesystem/etc/wireguard/hole-{priv,pub}.key
#
# extrahost is a host/cidr that is allowed to go be routed through the
# vpn by this host.
wghole() {
if (( $# < 2 || $# > 3 )); then
e expected 2-3 arg of hostname, ip suffix, and extrahost >&2
return 1
fi
local host ipsuf umask_orig vpn_allowed
host=$1
ipsuf=$2
if [[ $3 ]]; then
extrahost=,$3
fi
for vpn_host in ${!vpn_ips[@]}; do
if [[ $vpn_host == "$host" ]]; then
continue
fi
vpn_allowed+=",10.174.${vpn_ips[$vpn_host]}.2/32"
done
mkdir -p /p/c/machine_specific/$host/filesystem/etc/wireguard
(
cd /p/c/machine_specific/$host/filesystem/etc/wireguard
umask_orig=$(umask)
umask 0077
if [[ ! -s hole-priv.key || ! -s hole-pub.key ]]; then
wg genkey | tee hole-priv.key | wg pubkey > hole-pub.key
fi
cat >wghole.conf </dev/null; then
m sudm mount --bind /root/mount_namespaces /root/mount_namespaces
fi
m sudm mount --make-private /root/mount_namespaces
if ! sudm test -e /root/mount_namespaces/$ns; then
m sudm touch /root/mount_namespaces/$ns
fi
if ! sudm mountpoint /root/mount_namespaces/$ns >/dev/null; then
m sudm unshare --propagation slave --mount=/root/mount_namespaces/$ns /bin/true
fi
}
mns() { # mount namespace
local ns
ns=$1
shift
mns-setup $ns
m sudm -E /usr/bin/nsenter --mount=/root/mount_namespaces/$ns "$@"
}
mnsd() { # mount namespace + systemd namespace
local ns unit
ns=$1
unit=$2
shift 2
mns-setup $ns
pid=$(servicepid $unit)
tmpf=$(mktemp --tmpdir $unit.XXXXXXXXXX)
export -p >$tmpf
printf "%s " "${@@Q}" >>$tmpf
echo >>$tmpf
m sudo nsenter -t $pid -n --mount=/root/mount_namespaces/$ns sudo -u $USER -i bash -c ". $tmpf & sleep 1; rm $tmpf"
}
mnsr() { # mns run
local ns=$1
shift
mns $ns sudo -u iank -E env "PATH=$PATH" "$@"
}
mnsnonetr() {
ns=$1
lomh
if ! s ip netns list | grep -Fx nonet &>/dev/null; then
s ip netns add nonet
fi
mns $ns --net=/var/run/netns/nonet /bin/bash
lomh
}
mnsnonet() {
ns=$1
lomh
if ! s ip netns list | grep -Fx nonet &>/dev/null; then
s ip netns add nonet
fi
mns $ns --net=/var/run/netns/nonet sudo -E -u iank /bin/bash
lomh
}
lom() {
# l = the loopback device
local l base
# get sudo pass cached right away
if ! sudo -nv 2>/dev/null; then
sudo -v
fi
if [[ $1 == /* ]]; then
base=${1##*/}
fs_file=$1
if mns $base mountpoint -q /mnt/$base; then
return 0
fi
l=$(losetup -j $fs_file | sed -rn 's/^([^ ]+): .*/\1/p' | head -n1 ||:)
if [[ ! $l ]]; then
l=$(sudo losetup -f)
m sudo losetup $l $fs_file
fi
if ! sudo cryptsetup status /dev/mapper/$base &>/dev/null; then
if ! m sudo cryptsetup luksOpen $l $base; then
m sudo losetup -d $l
return 1
fi
fi
m sudo mkdir -p /mnt/$base
m mns $base mount /dev/mapper/$base /mnt/$base
m mns $base chown $USER:$USER /mnt/$base
lomh
else
base=$1
if mns $base mountpoint /mnt/$base &>/dev/null; then
m mns $base umount /mnt/$base
fi
if sudo cryptsetup status /dev/mapper/$base &>/dev/null; then
if ! m sudo cryptsetup luksClose /dev/mapper/$base; then
echo lom: failed cryptsetup luksClose /dev/mapper/$base
return 1
fi
fi
l=$(losetup -l --noheadings | awk '$6 ~ /\/'$base'$/ {print $1}')
if [[ $l ]]; then
m sudo losetup -d $l
else
echo lom: warning: no loopback device found
fi
fi
}
# mu personality. for original, just run mp. for 2, run mp 2.
# this is partly duplicated in mail-setup
mp() {
local dead=false
for s in {1..5}; do
if ! killall mu; then
dead=true
break
fi
sleep 1
done
if ! $dead; then
echo error: mu not dead
m psg mu
return 1
fi
suf=$1
set -- /m/mucache ~/.cache/mu /m/.mu ~/.config/mu
while (($#)); do
target=$1$suf
f=$2
shift 2
if [[ -e $f && ! -L $f ]]; then
m rm -rf $f
fi
m ln -sf -T $target $f
done
}
# maildir enable
mdenable() {
local md dst ln_path src two
two=false
case $1 in
-2) two=true; shift ;;
esac
for md; do
src=
if $two; then
dst=/m/4e2/$md
else
dst=/m/4e/$md
fi
ln_path=/m/md/$md
for d in /m/md/$md /m/4e2/$md; do
if [[ -d $d && ! -L $d ]]; then
src=$d
break
fi
done
if [[ ! $src ]]; then
echo "error: could not find $md" >&2
return 1
fi
m mv -T $src $dst
m ln -sf -T $dst $ln_path
done
}
md2enable() {
mdenable -2 "$@"
}
mddisable() {
local md=$1
dst=/m/md/$md
### begin copied from mdenable, but different d ###
for d in /m/4e/$md /m/4e2/$md; do
if [[ -d $d && ! -L $d ]]; then
src=$d
break
fi
done
if [[ ! $src ]]; then
echo "error: could not find $md" >&2
return 1
fi
### end copy from mdenable ###
if [[ -L $dst ]]; then m rm $dst; fi
m mv -T $src $dst
}
mdt() {
markdown "$1" >/tmp/mdtest.html
firefox /tmp/mdtest.html
}
mo() { xset dpms force off; } # monitor off
mpvgpu() {
# seems to be the best gpu decoding on my nvidia 670.
# vlc gets similar or better framerate, but is much darker output on my test movie at least.
case $HOSTNAME in
kd)
echo 0f | sudo tee -a /sys/kernel/debug/dri/0/pstate
;;
esac
# going back to the default slow clock, and slower fan:
# echo 07 | sudo tee -a /sys/kernel/debug/dri/0/pstate
if [[ $DISPLAY ]]; then
mpv --vo=vdpau --hwdec=auto "$@"
else
# waylandvk seems to work the same
mpv --gpu-context=wayland --hwdec=auto
fi
}
mpvd() {
mpv --profile=d "$@";
}
mpva() {
mpv --profile=a "$@";
}
# mpv for testing video quality, dont scale.
mpvt() {
mpv --video-unscaled "$@";
}
# mpv all media files in . or $1
mpvm() {
local -a extensions arg
# get page source of https://en.wikipedia.org/w/index.php?title=Video_file_format&action=edit
# into /a/x.log, then
# grep '^| *\.' /a/x.log | sed 's/| *//;s/,//g'
# note: to join them together for a regex, do:
# old=; for e in ${extensions[@]/./}; do if [[ ! $old ]]; then old=$e; continue; fi; echo -n "$old|"; old=$e; done; echo $e
extensions=(
.webm
.mkv
.flv
.flv
.vob
.ogv .ogg
.drc
.gif
.gifv
.mng
.avi
.MTS .M2TS .TS
.mov .qt
.wmv
.yuv
.rm
.rmvb
.viv
.asf
.amv
.mp4 .m4p .m4v
.mpg .mp2 .mpeg .mpe .mpv
.mpg .mpeg .m2v
.m4v
.svi
.3gp
.3g2
.mxf
.roq
.nsv
)
arg=("(" -iname "*${extensions[0]}")
for (( i=1 ; i < ${#extensions[@]}; i++ )); do
arg+=(-o -iname "*${extensions[i]}")
done
arg+=(")")
dir=${1:-.}
# debug:
#find $dir "${arg[@]}" -size +200k
find $dir "${arg[@]}" -size +200k -exec mpv --profile=d '{}' +
}
mpvs() {
mpv --profile=s "$@";
}
myirc() {
if [[ ! $1 ]]; then
set -- fsfsys
fi
local -a d
d=( /var/lib/znc/moddata/log/iank/{freenode,libera} )
# use * instead of -r since that does sorted order
ssh root@iankelling.org "for f in ${d[*]}; do cd \$f/#$1; grep '\ 16 )); then
i=$(( logcount - 16 ))
else
i=0
fi
# usually do this on monday, sometimes later
if [[ $(date +%A) == Monday ]]; then
min_date=$(date -d 'monday 2 weeks ago' +%s)
else
min_date=$(date -d 'monday 3 weeks ago' +%s)
fi
for (( ; i < logcount; i++ )); do
log=${logs[$i]}
d=$(date -d "$(head -n1 $log|awk '{print $1}')" +%s)
if (( d < min_date )); then
continue
fi
if awk '$3 == "iank:"' $log | sed -r 's/^(.{10}).(.{8})[^ ]+(.*)/\1_\2\3/' | grep .; then
hr
fi
done
popd
}
# Tail all recent prof logs. Copying from profanity has unwanted line breaks
# especially for links.
profr() {
case $HOSTNAME in
kd)
profr-local
;;
*)
ssh b8.nz profr-local
;;
esac
}
profr-local() {
local d0 d1
local -a files
d0="$(date +%Y_%m_%d).log"
d1="$(date -d '1 day ago' +%Y_%m_%d).log"
ngset
files=(/d/p/profanity/chatlogs/iank_at_fsf.org/{*,rooms/*}/{$d0,$d1})
ngreset
if (( ${#files[@]} > 0 )); then
cat "${files[@]}" | sort | tail -n 40
fi
}
# Tail pms in the last day, for the case where we restart profanity and
# didn't check for pms beforehand. Assume the most recent logs are on kd.
# If that isn't the case, use prof-recent-local
prof-recent() {
case $HOSTNAME in
kd)
prof-recent-local
;;
*)
ssh b8.nz prof-recent-local
;;
esac
}
prof-recent-local() {
local d dates date files f
# consider making the day count passed by parameter. note: this works: $(date -d '2 day ago' +%Y_%m_%d)
dates=("$(date +%Y_%m_%d)" "$(date -d '1 day ago' +%Y_%m_%d)" )
for d in /d/p/profanity/chatlogs/iank_at_fsf.org/!(rooms); do
files=()
for date in ${dates[@]}; do
f=$d/$date.log
if [[ -e $f ]]; then
files+=($f)
fi
done
if (( ${#files[@]} >= 1 )); then
cat ${files[@]} | tail
hr
fi
done
}
prof-sort() {
case $HOSTNAME in
kd)
prof-recent-sort
;;
*)
ssh b8.nz prof-recent-sort
;;
esac
}
prof-recent-sort() {
local d dates date files f
# consider making the day count passed by parameter. note: this works: $(date -d '2 day ago' +%Y_%m_%d)
dates=("$(date +%Y_%m_%d)" "$(date -d '1 day ago' +%Y_%m_%d)" )
files=()
for d in /d/p/profanity/chatlogs/iank_at_fsf.org/!(rooms); do
for date in ${dates[@]}; do
f=$d/$date.log
if [[ -e $f ]]; then
files+=($f)
fi
done
done
for f in "${files[@]}"; do
sed "s/\$/ $f/" $f
done | sort
}
# usage: debvm DEBIAN_VERSION RAM_MB
debvm() {
local ver ram fname src
ver=$1
ram=${2:-2024}
# * is because it might have -backports in the name. we only expect 1 expansion
fnames=( debian-$ver-*nocloud-"$(dpkg --print-architecture)".qcow2 )
if (( ${#fnames[@]} >= 2 )); then
echo "error: iank: unexpected multiple files"
return 1
fi
fname="${fnames[0]}"
src=/a/opt/roms/$fname
if [[ ! -f $src ]]; then
echo debvm: not found $src, download from eg: https://cloud.debian.org/images/cloud/buster/latest/
return 1
fi
cp -a $src /t
# note, in fai-revm we do this: not sure why, maybe because of br device
# --graphics spice,listen=0.0.0.0
m s virt-install --osinfo debian11 --rng /dev/urandom -n deb${ver}tmp --import -r $ram --vcpus 2 --disk /t/$fname --graphics spice
# note: to ssh into this machine will require host key generation: ssh-keygen -A
# random: for cvs2git on gnu www, use debian 10. I could use trisquel,
# but happen to want to try out the debian cloud images. the upstream
# requires python2 and hasn't really changed since the version in d10.
#
# apt install cvs2git cvs
# # 7G was not enough
# mount -o mode=1777,nosuid,nodev,size=34G -t tmpfs tmpfs /tmp
# cvs2git --encoding utf_8 --fallback-encoding ascii --dumpfile=dump www-rsync/www |& tee /tmp/l
## www-rsync is an rsynced copy of the cvsfrom savannah
}
mygajim() {
local time time_sec time_pretty days
days=${1:-16}
sqlite3 -separator ' ' /p/c/subdir_files/.local/share/gajim/logs.db "select time, message from logs where contact_name = 'iank' and jid_id = 17;" | while read -r time l; do
case $time in
16*) : ;;
*) continue ;;
esac
if ! time_pretty=$(date +%F.%R -d @$time); then
echo bad time: $time
return 1
fi
echo $time_pretty "$l"
time_sec=${time%%.*}
# only look at the last 18 days. generally just use this for timesheet.
if (( time_sec < EPOCHSECONDS - 60 * 60 * 24 * days )); then break; fi
done
}
allmygajim() {
sqlite3 -separator ' ' /p/c/subdir_files/.local/share/gajim/logs.db "select time, message from logs where contact_name = 'iank'" | less
}
gajlogs() {
sqlite3 -separator ' ' /p/c/subdir_files/.local/share/gajim/logs.db "select time, message from logs" | less
}
net-dev-info() {
e "lspci -nnk|gr -iA2 net"
lspci -nnk|gr -iA2 net
hr
e "s lshw -C network"
hr
sudo lshw -C network
}
nk() {
ser stop NetworkManager
ser disable NetworkManager
ser stop NetworkManager-wait-online.service
ser disable NetworkManager-wait-online.service
ser stop dnsmasq
sudo resolvconf -d NetworkManager
# ser start dnsmasq
sudo ifup br0
}
ngo() {
sudo ifdown br0
ser start NetworkManager
sleep 4
sudo nmtui-connect
}
otp() {
oathtool --totp -b "$*" | xclip -selection clipboard
}
# run cmd and copy output
j() {
"$@" |& pee "xclip -r -selection clipboard" cat
}
# xorg copy. copy text piped into command
xc() {
xclip -r -selection clipboard
}
# echo copy
ec() {
pee "xclip -r -selection clipboard" cat
}
pakaraoke() {
# from http://askubuntu.com/questions/456021/remove-vocals-from-mp3-and-get-only-instrumentals
pactl load-module module-ladspa-sink sink_name=Karaoke master=alsa_output.usb-Audioengine_Audioengine_D1-00.analog-stereo plugin=karaoke_1409 label=karaoke control=-30
}
pfind() { #find *$1* in $PATH
[[ $# != 1 ]] && { echo requires 1 argument; return 1; }
local pathArray
IFS=: pathArray=($PATH); unset IFS
find "${pathArray[@]}" -iname "*$1*"
}
pick-trash() {
# trash-restore lists everything that has been trashed at or below CWD
# This picks out files just in CWD, not subdirectories,
# which also match grep $1, usually use $1 for a time string
# which you get from running restore-trash once first
local name x ask
local nth=1
# last condition is to not ask again for ones we skipped
while name="$( echo | restore-trash | gr "$PWD/[^/]\+$" | gr "$1" )" \
&& [[ $name ]] && (( $(wc -l <<<"$name") >= nth )); do
name="$(echo "$name" | head -n $nth | tail -n 1 )"
read -r -p "$name [Y/n] " ask
if [[ ! $ask || $ask == [Yy] ]]; then
x=$( echo "$name" | gr -o "^\s*[0-9]*" )
echo $x | restore-trash > /dev/null
elif [[ $ask == [Nn] ]]; then
nth=$((nth+1))
else
return
fi
done
}
pub() {
rld /a/h/_site/ li:/var/www/iankelling.org/html
}
pumpa() {
# fixes the menu bar in xmonad. this won\'t be needed when xmonad
# packages catches up on some changes in future (this is written in
# 4/2017)
#
# geekosaur: so youll want to upgrade to xmonad 0.13 or else use a
# locally modified XMonad.Hooks.ManageDocks that doesnt set the
# work area; turns out it\'s impossible to set correctly if you are
# not a fully EWMH compliant desktop environment
#
# geekosaur: chrome shows one failure mode, qt/kde another, other
# gtk apps a third, ... I came up with a setting that works for me
# locally but apparently doesnt work for others, so we joined the
# other tiling window managers in giving up on setting it at all
#
xprop -root -remove _NET_WORKAREA
command pumpa & r
}
# reviewboard, used at my old job
#rbpipe() { rbt post -o --diff-filename=- "$@"; }
#rbp() { rbt post -o "$@"; }
rebr() {
sudo ifdown br0
sudo ifup br0
}
r2e() { command r2e -d /p/c/rss2email.json -c /p/c/rss2email.cfg "$@"; }
# only run on MAIL_HOST. simpler to keep this on one system.
r2eadd() { # usage: name url
# initial setup of rss2email:
# r2e new r2e@iankelling.org
# that initializes files, and sets default email.
# symlink to the config doesnt work, so I copied it to /p/c
# and then use cli option to specify explicit path.
# Only option changed from default config is to set
# force-from = True
#
# or else for a few feeds, the from address is set by the feed, and
# if I fail delivery, then I send a bounce message to that from
# address, which makes me be a spammer.
r2e add $1 "$2" $1@r2e.iankelling.org
# get up to date and dont send old entries now:
r2e run --no-send $1
}
rspicy() { # usage: HOST DOMAIN
# connect to spice vm remote host. use vspicy for local host
local port
# shellcheck disable=SC2087
port=$(ssh $1<$tmp/timing
# todo, the current sleep seems pretty good, but it
# would be nice to have an empirical measurement, or
# some better wait to sync up.
#
# note: --loop-file=no prevents it from hanging if you have that
# set to inf the mpv config.
# --loop=no prevents it from exit code 3 due to stdin if you
# had it set to inf in mpv config.
#
# args go to mpv, for example --volume=80, 50%
cat >$out <&2
return 1
fi
sdncmdroot spamassassin sudo -u Debian-exim spamassassin -t --cf='score PYZOR_CHECK 0' <"$1"
}
# mail related
testmail() {
declare -gi _seq; _seq+=1
echo "test body" | m mail -s "test mail from $HOSTNAME, $_seq" "${@:-root@localhost}"
# for testing to send from an external address, you can do for example
# -fian@iank.bid -aFrom:ian@iank.bid web-6fnbs@mail-tester.com
# note in exim, you can retry a deferred message
# s exim -M MSG_ID
# MSG_ID is in /var/log/exim4/mainlog, looks like 1ccdnD-0001nh-EN
}
# to test sieve, use below command. for fsf mail, see offlineimap-sync script
# make modifications, then copy to live file, use -eW to actually modify mailbox
#
# Another option is to use sieve-test SCRIPT MAIL_FILE. note,
# sieve-test doesnt know about envelopes, Im not sure if sieve-filter does.
# sieve with output filter. arg is mailbox, like INBOX.
# This depends on dovecot conf, notably mail_location in /etc/dovecot/conf.d/10-mail.conf
# always run this first, edit the test files, then run the following
testsieve() {
sieve-filter ~/sieve/maintest.sieve ${1:-INBOX} delete 2> >(head; tail) >/tmp/testsieve.log && sed -rn '/^Performed actions:/,/^[^ ]/{/^ /p}' /tmp/testsieve.log | sort | uniq -c
}
runsieve() {
c ~/sieve; cp personal{test,}.sieve; cp lists{test,}.sieve; cp personalend{test,}.sieve
sieve-filter -eWv ~/sieve/maintest.sieve ${1:-INBOX} delete &> /tmp/testsieve.log
sed -r '/^info: filtering:/{h;d};/^info: msgid=$/N;/^info: msgid=.*left message in mailbox [^ ]+$/d;/^info: msgid=/{H;g};/^info: message kept in source mailbox.$/d' /tmp/testsieve.log
}
# usage:
# alertme SUBJECT
# printf "subject\nbody\n" | alertme
alertme() {
if [[ -t 0 ]]; then
exim -t <' $to <
To: $to
Subject: Mail delivery failed: returning message to sender
This message was created automatically by mail delivery software.
EOF
}
# toggle keyboard
tk() {
# based on
# https://askubuntu.com/questions/160945/is-there-a-way-to-disable-a-laptops-internal-keyboard
id=$(xinput --list --id-only 'AT Translated Set 2 keyboard')
if xinput list | grep -F '∼ AT Translated Set 2 keyboard' &>/dev/null; then
echo enabling keyboard
# find the first slave keyboard number, they are all the same in my output.
# if they werent, worst case we would need to save the slave number somewhere
# when it got disabled.
slave=$(xinput list | sed -n 's/.*slave \+keyboard (\([0-9]*\)).*/\1/p' | head -n1)
xinput reattach $id $slave
else
xinput float $id
fi
}
tm() {
# timer in minutes
# --no-config
(sleep "$(calc "$* * 60")" && mpv --no-config --volume 50 /a/bin/data/alarm.mp3) > /dev/null 2>&1 &
}
## usage: to connect to my main transmission daemon from a different host, run this
trans-remote-route() {
:
}
trg() { transmission-remote-gtk & r; }
# TODO: this wont work transmission.lan doesnt exist
trc() {
# example, set global upload limit to 100 kilobytes:
# trc -u 100
TR_AUTH=":$(jq -r .profiles[0].password ~/.config/transmission-remote-gtk/config.json)" transmission-remote transmission.lan -ne "$@"
}
trysleep() {
retries="$1"
sleepsecs="$2"
shift 2
for (( i=0; i < retries - 1; i++ )); do
if "$@"; then
return 0
fi
sleep $sleepsecs
done
"$@"
}
tu() {
local s
if [[ -e $1 && ! -w $1 || ! -w $(dirname "$1") ]]; then
s=s;
fi
# full path for using in some initial setup steps
$s /a/exe/teeu "$@"
}
# execute exim in its namespace. Useful args like -Mrm
enn() {
local ecmd pid
ecmd="/usr/sbin/exim4 -C /etc/exim4/nn-mainlog.conf"
if ip a show veth1-mail &>/dev/null; then
s $ecmd "$@"
else
sdncmdroot exim4 $ecmd "$@"
fi
}
# get pid of systemd service
servicepid() {
local pid unit dir
unit="$1"
pid=$(systemctl show --property MainPID --value "$unit")
case $pid in
[1-9]*) : ;;
*)
dir=/sys/fs/cgroup/system.slice
if [[ ! -d $dir ]]; then
# t10 and older directory.
dir=/sys/fs/cgroup/systemd/system.slice
fi
# 0 or empty. This file includes the MainPid, so I expect we
# could just get this in the first place, but i don't know if that
# is always the case.
pid=$(head -n1 $dir/${unit%.service}.service/cgroup.procs)
;;
esac
if [[ $pid ]]; then
printf "%s\n" "$pid"
else
return 1
fi
}
sdnbash() { # systemd namespace bash
local unit pid
if (( $# != 1 )); then
echo $0: error wrong number of args >&2
return 1
fi
unit=$1
pid=$(servicepid $unit)
m sudo nsenter -t $pid -n -m sudo -u $USER -i bash
}
sdnbashroot() { # systemd namespace bash as root
local unit pid
if (( $# != 1 )); then
echo $0: error wrong number of args >&2
return 1
fi
unit=$1
pid=$(servicepid $unit)
m sudo nsenter -t $pid -n -m bash
}
# systemd namespace cmd
# usage: UNIT CMD...
sdncmd() {
local unit pid tmpf
if (( $# <= 1 )); then
echo $0: error wrong number of args >&2
return 1
fi
unit=$1
shift
pid=$(servicepid $unit)
tmpf=$(mktemp --tmpdir $unit.XXXXXXXXXX)
export -p >$tmpf
printf "%s " "${@@Q}" >>$tmpf
echo >>$tmpf
m sudo nsenter -t $pid -n -m sudo -u $USER -i bash -c ". $tmpf & rm $tmpf"
}
sdncmdroot() { # systemd namespace root command
local unit pid
if (( $# < 2 )); then
echo $0: error wrong number of args >&2
return 1
fi
unit=$1
shift
pid=$(servicepid $unit)
m sudo nsenter -t $pid -n -m "$@"
}
# systemd network namespace (not mount) cmd
# usage: UNIT CMD...
sdnncmd() {
local unit pid tmpf
if (( $# <= 1 )); then
echo $0: error wrong number of args >&2
return 1
fi
unit=$1
shift
pid=$(servicepid $unit)
tmpf=$(mktemp --tmpdir $unit.XXXXXXXXXX)
export -p >$tmpf
printf "%s " "${@@Q}" >>$tmpf
echo >>$tmpf
m sudo nsenter -t $pid -n sudo -u $USER -i bash -c ". $tmpf & rm $tmpf"
}
mailnnbash() {
sdnbash mailnn
}
# we use wireguard now, use mailnnbash.
# mailvpnbash() {
# m sudo nsenter -t $(pgrep -f "/usr/sbin/openvpn .* --config /etc/openvpn/.*mail.conf") -n -m sudo -u $USER -i bash
# }
eximbash() {
sdnbashroot exim4
}
spamnn() {
local spamdpid
spamdpid=$(systemctl show --property MainPID --value spamassassin)
m sudo nsenter -t $spamdpid -n -m sudo -u Debian-exim spamassassin "$@"
}
unboundbash() {
sdnbashroot unbound
}
nmtc() {
s nmtui-connect "$@"
}
# check exim and others network namespace
mailnncheck() {
local unit pid ns mailnn spamd_ser
spamd_ser=spamd
if systemctl cat spamassassin &>/dev/null; then
spamd_ser=spamassassin
fi
# mailvpn would belong on the list if using openvpn
for unit in mailnn unbound dovecot $spamd_ser exim4 radicale; do
pid=$(servicepid $unit)
echo debug: unit=$unit pid=$pid
if [[ ! $pid ]]; then
echo failed to find pid for unit=$unit
continue
fi
if ! ns=$(s readlink /proc/$pid/ns/net); then
echo failed to find ns for unit=$unit pid=$pid
continue
fi
if [[ $mailnn ]]; then
if [[ $ns != "$mailnn" ]]; then
echo "$unit ns $ns != $mailnn"
fi
else
mailnn=$ns
fi
done
}
vpncmd() {
sdncmd openvpn-client-tr@client.service "$@"
}
vpni() {
sdncmd openvpn-client-tr@client.service bash
}
vpnbash() {
sdncmdroot openvpn-client-tr@client.service bash
}
vpn() {
if [[ -e /lib/systemd/system/openvpn-client@.service ]]; then
local vpn_service=openvpn-client
else
local vpn_service=openvpn
fi
[[ $1 ]] || { echo need arg; return 1; }
journalctl --since=now --unit=$vpn_service@$1 -f -n0 &
sudo systemctl start $vpn_service@$1
# sometimes the ask-password agent does not work and needs a delay.
sleep .5
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=779240
# noticed around 8-2017 after update from around stretch release
# on debian testing, even though the bug is much older.
sudo systemd-tty-ask-password-agent
}
fixu() {
local stats
ls -lad /run/user/1000
stats=$(stat -c%a-%g-%u /run/user/1000)
if [[ $stats != 700-1000-1000 ]]; then
m s chmod 700 /run/user/1000; m s chown iank.iank /run/user/1000
fi
}
# unmute desktop output
um() {
local sink card sedcmd
sink=$(pactl get-default-sink)
if [[ $sink == auto_null ]]; then
# guessing there is just one with an off profile. otherwise we will
# need some other solution, like storing the card identifier that we
# muted with nap. Or, we could so some hakery with
# pactl -f json.
sedcmd='/^[[:space:]]*index:/{s/^[[:space:]]*index://;h};/^[[:space:]]*active profile: $/{g;p;q}'
card=$(pacmd list-cards | sed -n "$sedcmd")
m pacmd set-card-profile "$card" output:analog-stereo
fi
m pactl set-sink-mute @DEFAULT_SINK@ false
rm -f /tmp/ianknap
}
nap() {
local sink card
sink=$(pactl get-default-sink)
card="${sink%.*}"
card="${card/output/card}"
m pacmd set-card-profile "$card" off
# clicking on a link in a browser can cause unmute.
# I don't want that. So, use a stronger form of mute
# than this.
#pactl set-sink-mute @DEFAULT_SINK@ true
touch /tmp/ianknap
}
# systemctl is-enabled / status / cat says nothing, instead theres
# some obscure symlink. paths copied from man systemd.unit.
# possibly also usefull, but incomplete, doesnt show units not loaded in memory:
# seru list-dependencies --reverse --all UNIT
sysd-deps() {
local f
local -a dirs search
ngset
case $1 in
u)
search=(
~/.config/systemd/user.control/*
$XDG_RUNTIME_DIR/systemd/user.control/*
$XDG_RUNTIME_DIR/systemd/transient/*
$XDG_RUNTIME_DIR/systemd/generator.early/*
~/.config/systemd/user/*
/etc/systemd/user/*
$XDG_RUNTIME_DIR/systemd/user/*
/run/systemd/user/*
$XDG_RUNTIME_DIR/systemd/generator/*
~/.local/share/systemd/user/*
/usr/lib/systemd/user/*
$XDG_RUNTIME_DIR/systemd/generator.late/*
)
;;
*)
search=(
/etc/systemd/system.control/*
/run/systemd/system.control/*
/run/systemd/transient/*
/run/systemd/generator.early/*
/etc/systemd/system/*
/etc/systemd/systemd.attached/*
/run/systemd/system/*
/run/systemd/systemd.attached/*
/run/systemd/generator/*
/lib/systemd/system/*
/run/systemd/generator.late/*
)
;;
esac
for f in "${search[@]}"; do
[[ -d $f ]] || continue
case $f in
*.requires|*.wants)
dirs+=("$f")
;;
esac
done
# dirs is just so we write out the directory names, ls does it when there is 2 or more dirs.
case ${#dirs[@]} in
1)
echo "${dirs[0]}:"
ll "${dirs[@]}"
;;
0) : ;;
*)
ll "${dirs[@]}"
;;
esac
ngreset
}
fixvpndns() {
local link istls
read -r _ link _ istls < <(resolvectl dnsovertls tunfsf)
case $istls in
yes|no) : ;;
*) echo fixvpndns error: unexpected istls value: $istls >&2; return 1 ;;
esac
s busctl call org.freedesktop.resolve1 /org/freedesktop/resolve1 org.freedesktop.resolve1.Manager SetLinkDNSOverTLS is $link no
}
vpnoff() {
[[ $1 ]] || { echo need arg; return 1; }
if [[ -e /lib/systemd/system/openvpn-client@.service ]]; then
local vpn_service=openvpn-client
else
local vpn_service=openvpn
fi
sudo systemctl stop $vpn_service@$1
}
vpnoffc() { # vpn off client
ser stop openvpn-client-tr@client
}
vpnc() {
local unit
unit=openvpn-client-tr@client
sudo -v
if [[ $(systemctl is-active $unit) != active ]]; then
s systemctl start $unit
sleep 1
fi
}
vspicy() { # usage: VIRSH_DOMAIN
# connect to vms made with virt-install
spicy -p "$(sudo virsh dumpxml "$1"|grep "&2
return 1
fi
sec=$(( EPOCHSECONDS - $( date +%s -d $1am ) ))
printf "%d:%02d\n" $(( sec / 60 / 60)) $(( (sec / 60) % 60 ))
}
calvis() { # calendar visualize
install -m 600 /dev/null /tmp/calendar-bytes
while read -r l; do
for char in $l; do
# shellcheck disable=SC2059 # intentional for the hex formatting
printf "\x$(printf "%x" $char)" >>/tmp/calendar-bytes
done
done < <(grep -v '[#-]' /p/calendar-data)
/p/c/proc/calendar/linux-amd64/calendar
}
wtr() { curl wttr.in/boston; }
xevkb() { xev -event keyboard; }
# * misc stuff
vrun() {
printf "running: %s\n" "$*"
"$@"
}
electrum() {
# Running the appimage said fuse was not available, but try
# running the appimage with --appimage-extract, which worked.
# It seems there is no need to backup the wallet, it can be restored
# via the seed onto any computer that needs it.
/a/opt/electrum/squashfs-root/AppRun "$@"
# This was an old way I ran electrum over tor, and seems like I
# imported a bitcoin core wallet.
#
# https://electrum.readthedocs.io/en/latest/tor.html
# https://github.com/spesmilo/electrum-docs/issues/129
# s rsync -ptog --chown bitcoin:bitcoin ~/.Xauthority /var/lib/bitcoind/.Xauthority
# sudo -u bitcoin DISPLAY=$DISPLAY XAUTHORITY=/var/lib/bitcoind/.Xauthority /a/opt/electrum-4.2.1-x86_64.AppImage -p socks5:localhost:9050
}
monero() {
sudo -u bitcoin DISPLAY=$DISPLAY XAUTHORITY=/var/lib/bitcoind/.Xauthority /a/opt/monero-gui-v0.17.3.2/monero-wallet-gui
}
# grep + find
gef() {
faf | grep -E "$@" ||:
rgv "$@"
}
# rg my main files
rgm() {
rg "$@" /p/w.org /a/t.org /a/work.org /b
}
# re all my files more expansively.
# usage [-OPT...] regex space combined
rem() {
local paths
local -a opts
for arg; do
if [[ $arg == -* ]]; then
opts+=("$1")
shift
else
break
fi
done
paths="/p/c /b/"
find $paths -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) 2>/dev/null | grep -iP --color=auto -- "$*" ||:
rgv $local_rgv_args -g "!bash_unpublished" "${opts[@]}" -- "$*" $paths /a/work.org ||:
}
reml() { # rem with limit to 5 matches per file
local_rgv_args="-m 5"
rem "$@"
}
rep() {
local paths
paths="/p/c"
find $paths -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) 2>/dev/null | grep -iP --color=auto -- "$*" ||:
rgv $local_rgv_args -- "$*" $paths /a/t.org /p/w.org ||:
}
repl() { # rem with limit to 5 matches per file
local local_rgv_args="-m 5"
rem "$@"
}
# re on common fsf files
ref() {
local paths
paths="/f/gluestick /f/brains /f/s /c"
find $paths -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) 2>/dev/null | grep -iP --color=auto -- "$*" ||:
rgv -- "$*" $paths /a/work.org ||:
}
# for use in /f/bind
fupzone() {
# shellcheck disable=SC2046 # i want word splitting
./update-zone $(i s | sed -rn 's/.*db\.(.*)/\1/p')
}
# setup:
# pip3 install linode-cli
# linode-cli
livp9() {
local input ip id tmp
input=$1
if [[ $2 ]]; then
id=$2
ip=$3
else
tmp=$(mktemp)
echo $tmp
linode-cli --json --pretty linodes create --root_pass loxHuceygomGisun | tee $tmp
read -r ip id <<<"$(tail -n+2 $tmp | jq -r '.[0].ipv4[0] , .[0].id')"
for string in $ip $id; do
case $string in
[0-9]*) : ;;
*)
echo "livp9: bad value ip=$ip id=$id input=$input"
return 1
;;
esac
done
rm $tmp
while true; do
if timeout 4 ssh $ip :; then
break
fi
sleep 3
done
fi
ssh $ip <\0
,'
}
reset-xscreensaver() {
# except for spash, i set these by setting gui options in
# xscreensaver-command -demo
# then finding the corresponding option in .xscreensaver
# spash, i happened to notice in .xscreensaver
#
# dpmsOff, monitor doesnt come back on using old free software supported nvidia card
cat > /home/iank/.xscreensaver <<'EOF'
mode: blank
dpmsEnabled: True
dpmsStandby: 0:07:00
dpmsSuspend: 0:08:00
dpmsOff: 0:00:00
timeout: 0:05:00
lock: True
lockTimeout: 0:06:00
splash: False
EOF
}
# very useful, copy directory structure 3 deep. add remove /*/ to change level
# rsync -aivh --exclude '/*/*/*/' -f"+ */" -f"- *" SRC DEST
# * stuff that makes sense to be at the end
if [[ "$SUDOD" ]]; then
# allow failure, for example if we are sudoing into a user with diffferent/lesser permissions.
cd "$SUDOD" ||:
unset SUDOD
elif [[ -d /a ]] && [[ $PWD == "$HOME" ]] && [[ $- == *i* ]]; then
cd /a
OLDPWD=
fi
# for mitmproxy to get a newer python.
# commented until i want to use it because it
# noticably slows bash startup
#
mypyenvinit () {
if [[ $EUID == 0 || ! -e ~/.pyenv/bin ]]; then
echo "error: dont be root. make sure pyenv is installed"
return 1
fi
export PATH="$HOME/.pyenv/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
}
# I have the git repo and a release. either one should work.
# I have both because I was trying to solve an issue that
# turned out to be unrelated.
# ARDUINO_PATH=/a/opt/Arduino/build/linux/work
## i should have documented this...
# based on https://github.com/keyboardio/Kaleidoscope
export KALEIDOSCOPE_DIR=/a/opt/Kaleidoscope
# They want to be added to the start, but i think
# that should be avoided unless we really need it.
path-add --end ~/.npm-global
path-add --end $HOME/.cargo/bin
if type -P rg &>/dev/null; then
# --no-messages because of annoying errors on broken symlinks
# -z = search .gz etc files
# -. = search dotfiles
# -n --no-heading: show files and line numbers together allowing for clicking
rg() {
local path_arg
if [[ ${#@} == 1 ]]; then
path_arg=.
fi
command rg -. -z --no-messages -Lin --no-heading -M 900 --no-ignore-parent --no-ignore-vcs -g '!.git' -g '!auto-save-list' -g '!.savehist' "$@" $path_arg || return $?
}
#fails if not exist. ignore
complete -r rg 2>/dev/null ||:
else
alias rg=grr
fi
# rg with respecting vcs ignore files
rgv() {
local path_arg ret=0
if [[ ${#@} == 1 ]]; then
path_arg=.
fi
# settings that are turned off for pipes, keep them on.
# Found by searching for "terminal" in --help
# --heading
# -n
#
# -. = search dotfiles
# -z = search zipped files
# -i = case insensitive
# -M = max columns
# -n --no-heading: show files and line numbers together allowing for clicking
# --no-messages because of annoying errors on broken symlinks
# --no-ignore-parent because i have /a/.git which ignores almost everything under it.
command rg -n --no-heading -. -z --no-messages -i -M 900 --no-ignore-parent -g '!.git' -g '!auto-save-list' -g '!.savehist' "$@" $path_arg || ret=$?
return $ret
}
amall() {
echo "$(tput setaf 5 2>/dev/null ||:)█ coresite █$(tput sgr0 2>/dev/null||:)"
amfsf "$@"
echo "$(tput setaf 5 2>/dev/null ||:)█ office █$(tput sgr0 2>/dev/null||:)"
amoffice "$@"
}
amallq() { # amall quiet
amfsf "$@"
amoffice "$@"
}
amfsf() {
sedi -r '/alertmanager.url/s/@prom.office/@prom/' ~/.config/amtool/config.yml
amtool "$@"
}
amoffice() {
sedi -r '/alertmanager.url/s/@prom.fsf/@prom.office.fsf/' ~/.config/amtool/config.yml
amtool "$@"
}
amls() {
amall silence query "$@"
}
# amtool silence add
amsa() {
amall silence add "$@"
}
# amtool silence force
amsf() {
amall silence add x!="1"
}
amrmall() {
# note: not sure if quoting of this arg is correct
amfsf silence expire "$(amfsf silence query -q)"
amoffice silence expire "$(amoffice silence query -q)"
}
youtube-dl-update() {
sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
sudo chmod a+rx /usr/local/bin/youtube-dl
}
# https://github.com/yt-dlp/yt-dlp/wiki/Installation
yt-dlp-update() {
sudo curl -L https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp
sudo chmod a+rx /usr/local/bin/yt-dlp # Make executable
}
mpvyt() {
mpv --ytdl ytdl_path=/usr/local/bin/yt-dlp "$@"
}
# taken from default changes to bashrc and bash_profile
path-add --end --ifexists $HOME/.rvm/bin
# also had ruby bin dir, but moved that to environment.sh
# so its included in overall env
# ya, hacky hardcoded hostnames in 2023. we could do better
hssh-update() {
local -a failed_hosts hosts
case $HOSTNAME in
sy|kd)
hosts=(
kd.b8.nz x3.office.fsf.org syw x2.b8.nz
)
;;
x3)
hosts=(
b8.nz sywg.b8.nz
)
;;
esac
for host in ${hosts[@]}; do
e $host
if ! scp /b/fai/fai/config/files/usr/local/bin/hssh/IANK root@$host:/usr/local/bin/hssh; then
failed_hosts+=($host)
fi
done
if (( ${#failed_hosts[@]} >= 1 )); then
echo failed_hosts=${failed_hosts[*]}
return 1
fi
}
noi3bar() {
touch /tmp/noi3bar
}
i3bar() {
rm -fv /tmp/noi3bar
}
# example:
# <#part type="image/jpeg" filename="/home/iank/2023-12-24-ski-trip.jpg" disposition=attachment> <#/part>
#
attach-txt() {
local f
for f; do
if [[ ! -s $f ]]; then
e "error: empty or non-existent file $f"
return 1
fi
done
for f; do
echo '<#part type="image/jpeg" filename="'"$(rl "$f")"'" disposition=attachment> <#/part>'
done | ec
}
ctof() {
units "tempC($1)" tempF
}
ftoc() {
units "tempF($1)" tempC
}
# local icecast
localic() {
local mod=false
cedit live /p/c/machine_specific/vps/filesystem/var/lib/bind/db.iankelling.org <<'EOF' || mod=true
live CNAME b8.nz.
EOF
if $mod; then
ip=$(ip r show default | sed -r 's/.*src ([^ ]*).*/\1/' | head -n1)
if [[ ! $ip ]] && timeout 1 ping -c 1 $ip; then
echo "error: failed to get ip: $ip" >&2
exit 1
fi
cat >/p/c/cmc-firewall-data-http <
AuthType Basic
AuthName "basic_auth"
# created with
# htpasswd -c icecast-fsf-htpasswd USERNAME
AuthUserFile "/etc/icecast-fsf-htpasswd"
Require valid-user
AuthType Basic
AuthName "basic_auth"
AuthUserFile "/etc/icecast-fsf-tech-htpasswd"
Require valid-user
EOF
s cat /etc/letsencrypt/live/live.iankelling.org/{fullchain,privkey}.pem | s dd of=/etc/icecast2/fullchainpluskey.pem
ser start icecast2
}
# li icecast
liic() {
cedit live /p/c/machine_specific/vps/filesystem/var/lib/bind/db.iankelling.org <<'EOF' || bindpush
live A 72.14.176.105
AAAA 2600:3c00::f03c:91ff:fe6d:baf8
EOF
}
# icecast rm -r
icrmr() {
find /var/icecast -type f -delete
ssh li.b8.nz find /var/icecast -type f -delete
}
# obs screen switching of
obof() {
ls -l /tmp/no-obs-auto-scene-switch
touch /tmp/no-obs-auto-scene-switch
}
# obs screen switching on
obon() {
ls -l /tmp/no-obs-auto-scene-switch
if [[ -e /tmp/no-obs-auto-scene-switch ]]; then
rm -f /tmp/no-obs-auto-scene-switch
fi
}
obs-gen-profiles() {
local p=/p/c/basic/profiles
sed 's/fsf-sysops/fsf-tech/g' $p/fsfsysops/basic.ini >$p/fsftech/basic.ini
sed 's/fsf-sysops/fsf/g' $p/fsfsysops/basic.ini >$p/fsf/basic.ini
}
# terminal clear. like clear, but put the prompt at the bottom,
# useful for obs streaming the bottom half of a terminal window.
tclear() {
for ((i=0; i&1) || ret=1
printf "%s\n" "$out"
if [[ $out == *"try diffing"* ]]; then
tmpdir=$(mktemp -d)
m scp cmc:/tmp/dhcp.leases cmc:/tmp/dhcp.leases.iank $tmpdir
m diff $tmpdir/dhcp.leases $tmpdir/dhcp.leases.iank ||:
rm -rf $tmpdir
fi
return $ret
}
# ffs and switch the bash history on this terminal.
# disabled because I don't really need this and
# the history switching is annoying for debugging.
#
# ffs() {
# local last
# last="${*: -1}"
# if [[ $last && $last != -* && $last != sysops ]]; then
# his
# fi
# command ffs "$@"
# }
i3gen() {
/b/ds/i3-sway/gen
}
# insensitive find plus edit
ife() {
local tmps found_count i char file
local -a found_files
local -A button_file
tmps=$(ifn "$@")
mapfile -t found_files <<<"$tmps"
found_count=${#found_files[@]}
if (( ${#found_files[@]} == 1 )); then
m g ${found_files[0]}
else
i=0
for button in {a..z}; do
button_file[$button]="${found_files[$i]}"
echo $button: ${found_files[$i]}
i=$(( i + 1 ))
if (( i >= found_count )); then
break
fi
done
read -rsN1 -t 5 char ||:
file="${button_file[$char]}"
if [[ $file ]]; then
g "$file"
else
echo "no selection"
fi
fi
}
# decrease filesize without losing any noticeable quality. inspired from
# https://gist.github.com/BlueSwordM/86dfcb6ab38a93a524472a0cbe4c4100
ffsencode() {
in="$1"
out="$2"
ffmpeg -i "$in" -c:v libsvtav1 -crf 60 -preset 6 -g 60 -svtav1-params tune=0:enable-overlays=1:scd=1:scm=1 -pix_fmt yuv420p10le -c:a copy "$out"
}
export BASEFILE_DIR=/a/bin/fai-basefiles
#export ANDROID_HOME=/a/opt/android-home
# https://f-droid.org/en/docs/Installing_the_Server_and_Repo_Tools/
#export USE_SDK_WRAPPER=yes
#PATH=$PATH:$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools
# didnt get drush working, if I did, this seems like the
# only good thing to include for it.
# Include Drush completion.
# if [ -f "/home/ian/.drush/drush.complete.sh" ] ; then
# source /home/ian/.drush/drush.complete.sh
# fi
# best practice
unset IFS
# https://wiki.archlinux.org/index.php/Xinitrc#Autostart_X_at_login
# i added an extra condition as gentoo xorg guide says depending on
# $DISPLAY is fragile.
if [[ ! $DISPLAY && $XDG_VTNR == 1 ]] && shopt -q login_shell && isarch; then
exec startx
fi
# ensure no bad programs appending to this file will have an affect
return 0