X-Git-Url: https://iankelling.org/git/?a=blobdiff_plain;f=brc2;h=af4d8082aaa92268c4f0f9daa5e10e6501be15c5;hb=51c8b40fd2aac71d29dc9298ca65425725ad1edd;hp=1974c580b867ea432b9cff6b42664080888f9087;hpb=06a56540b041accaaff795ad57664e368cb07a10;p=distro-setup diff --git a/brc2 b/brc2 index 1974c58..af4d808 100644 --- a/brc2 +++ b/brc2 @@ -71,6 +71,8 @@ source /a/bin/ds/beet-data # * functions + + multimic() { local i local -a sources @@ -126,6 +128,12 @@ zcheck() { ssh bow rm /tmp/oegu.jpg feh /t/oegu.jpg } +zmon() { + while true; do + ziva-screen + sleep 15 + done +} slemacs() { local arg rtime v @@ -358,6 +366,17 @@ anki() { schroot -c buster -- anki } +daycat() { + ngset + hrcat /m/md/daylert/{cur,new}/* + ngreset +} +dayclear() { + ngset + rm -f /m/md/daylert/{cur,new}/* +} + + acat() { ngset hrcat /m/md/alerts/{cur,new}/* @@ -420,22 +439,19 @@ astudio() { # Convert brains file path to url and vice versa # usage: brains [URL_OR_PATH] brains() { - _iki-convert '(/a)?/f/brains' brains.fsf.org "$@" + _iki-convert brains.fsf.org/wiki "$@" } glue() { - _iki-convert '(/a)?/f/gluestick' gluestick.office.fsf.org "$@" + _iki-convert gluestick.office.fsf.org "$@" } -# usage: $0 REPO_PATH [URL_OR_PATH] +# usage: see above _iki-convert() { - local url domain path input err repo_dir dir url_dir url - repo_dir="$1" - domain="$2" - shift 2 - err=false - if $err; then - return 1 - fi + local url url_prefix path input err repo_dir dir url_dir url name + url_prefix="$1" + name="${url_prefix%%.*}" + repo_dir="/f/$name" + shift if [[ $1 ]]; then input="$*" else @@ -443,7 +459,7 @@ _iki-convert() { fi case $input in http*) - path="$repo_dir/${input##http*://"$domain"/wiki/}" + path="$repo_dir/${input##http*://"$url_prefix"/}" if [[ $path == */ ]]; then path=${path%/}.mdwn fi @@ -451,8 +467,8 @@ _iki-convert() { ;; *) path=$(fp "$input") - url_dir=$(echo "$path" | sed -r "s,^$repo_dir/,,") - url="https://$domain/wiki/$url_dir" + url_dir=$(echo "$path" | sed -r "s,^(/a)?$repo_dir/,,") + url="https://$url_prefix/$url_dir" url="${url%.mdwn}/" j echo "$url" ;; @@ -674,6 +690,7 @@ beetag-nostatus() { fi if $erasable_line; then # https://stackoverflow.com/a/71286261 + # erase line / delete line in terminal printf '\033[1A\033[K' fi erasable_line=false @@ -724,7 +741,7 @@ mpvrpc-percent-pos() { # q quit # ret next # -beetag() { +beetag() { local last_genre_i fstring tag id char new_item char_i genre tag remove doplay i j random path local do_rare_genres read_wait help line lsout tmp ls_line skip_lookback local escape_char escaped_input expected_input skip_input_regex right_pad erasable_line seek_sec @@ -1189,7 +1206,7 @@ beet2nav() { # pull in beets library locally beetpull() { - local sshfs_host + local sshfs_host sshfs_cmd sshfs_host=b8.nz if [[ $HOSTNAME == kd ]]; then return 0 @@ -1198,8 +1215,9 @@ beetpull() { s mkdir /i s chown iank:iank /i fi - if ! mountpoint /i &>/dev/null; then - m sshfs $sshfs_host:/i /i + sshfs_cmd="sshfs -o ServerAliveInterval=15,reconnect $sshfs_host:/i /i" + if ! pgrep -f "^$sshfs_cmd$" >/dev/null; then + m $sshfs_cmd fi } @@ -1642,7 +1660,7 @@ lipush() { local p a # excluding emacs for now #p=(/a/opt/{emacs-debian11{,-nox},mu,emacs} /a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts}) - p=(/a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts} /c/roles/prom_export/files/simple/usr/local/bin/fsf-install-node-exporter) + p=(/a/bin /a/exe /a/h /a/c /p/c/machine_specific/vps{,.hosts} /c/roles/prom_export/files/simple/usr/local/bin/fsf-install-node-exporter /a/opt/fpaste) a="-ahviSAXPH --specials --devices --delete --relative --exclude-from=/p/c/li-rsync-excludes" ret=0 for h in li je bk; do @@ -1673,7 +1691,7 @@ bindpush() { dsign iankelling.org expertpathologyreview.com zroe.org amnimal.ninja lipush for h in li bk; do - m sl $h <<'EOF' + m sl $h.b8.nz <<'EOF' source ~/.bashrc m dnsup EOF @@ -2141,7 +2159,7 @@ gpg() { } gse() { - local email=ian@iankelling.org + local email=iank@fsf.org git send-email --notes "--envelope-sender=<$email>" \ --suppress-cc=self "$@" } @@ -2165,11 +2183,13 @@ hstatus() { done } -# work log +## work log +# +# note: database location is specified in ~/.timetrap.yml, currently /p/.timetrap.db wlog() { local day i days_back days_back=${1:-16} - for (( i=0; i=0; i-- )); do day=$( date +%F -d @$((EPOCHSECONDS - 86400*i )) ) date "+%a %b %d" -d @$((EPOCHSECONDS - 86400*i )) | tr '\n' ' ' /a/opt/timetrap/bin/t d -ftotal -s $day -e $day all -m '^w|lunch$' @@ -2187,6 +2207,8 @@ tl() { t s w } + +# help me focus. opens 2 windows. focus() { /p/c/proc/focus/linux-amd64/focus & watcharb5 @@ -2194,12 +2216,17 @@ focus() { } +# Display a list of the active window title +# i've been on with 10 second samples going back +# 5 minutes. If I've been on one window for 10 seconds +# or longer, then display the second count. +# +# Press any key to exit. watcharb5() { local char ret - killall arbtt-capture ||: + killall arbtt-capture &>/dev/null ||: rm -f ~/.arbtt/capture.log arbtt-capture --sample-rate=10 & - clear while true; do arb5 ret=0 @@ -2216,30 +2243,49 @@ watcharb5() { killall arbtt-capture ||: return 0 fi - clear done } arb5() { - local i l sec - i=0 + local i j l sec blanks line + local -a arbtt_lines if [[ ! -e ~/.arbtt/capture.log ]]; then sleep 5 fi - # https://stackoverflow.com/questions/56486272/how-to-concat-multiple-fields-to-same-line-with-jq - arbtt-dump -l 30 -t json | jq -r '.[] | [ ( .inactive / 1000 | floor ) , ( .windows[] | select (.active == true) |.title) ] | @tsv' \ - | tac | while read -r sec l; do - if (( i % 6 == 0 && i >= 2 )); then - echo "## $(( i / 6 + 1 )) ##" - fi - if (( sec > 10 )); then - printf "%3d %s\n" $sec "$l" - else - printf " %s\n" "$l" - fi - i=$(( i + 1 )) + blanks=$(( LINES - 34 )) + for (( i=0; i < blanks; i++ )); do + echo done + + { + i=0 + j=0 + # https://stackoverflow.com/questions/56486272/how-to-concat-multiple-fields-to-same-line-with-jq + arbtt_lines=$(arbtt-dump -l 30 -t json | \ + jq -r '.[] | [ ( .inactive / 1000 | floor ) , ( .windows[] | select (.active == true) |.title) ] | @tsv' | tac) + for line in "${arbtt_lines[@]}"; do + read -r sec l <<<"$line" + if (( j >= LINES )); then + break + fi + if (( i % 6 == 0 && i >= 2 )); then + j=$(( j + 1 )) + echo "## $(( i / 6 + 1 )) ##" + fi + if (( sec > 10 )); then + printf "%3d %s\n" $sec "$l" | sed -r "s/^(.{$COLUMNS}).*/\1/" + else + printf " %s\n" "$l" | sed -r "s/^(.{$COLUMNS}).*/\1/" + fi + i=$(( i + 1 )) + j=$(( j + 1 )) + done + while (( j < 34 && j < LINES )); do + echo + j=$(( j + 1 )) + done + } | tac } arbttlog() { @@ -2533,12 +2579,49 @@ mns() { # mount namespace m sudo -E /usr/bin/nsenter --mount=/root/mount_namespaces/$ns "$@" } +mnsd() { # mount namespace + systemd namespace + ns=$1 + unit=$2 + shift 2 + + s mkdir -p /root/mount_namespaces + if ! sudo mountpoint /root/mount_namespaces >/dev/null; then + m sudo mount --bind /root/mount_namespaces /root/mount_namespaces + fi + m sudo mount --make-private /root/mount_namespaces + if [[ ! -e /root/mount_namespaces/$ns ]]; then + m sudo touch /root/mount_namespaces/$ns + fi + if ! sudo mountpoint /root/mount_namespaces/$ns >/dev/null; then + m sudo unshare --propagation slave --mount=/root/mount_namespaces/$ns /bin/true + fi + + pid=$(servicepid $unit) + tmpf=$(mktemp --tmpdir $unit.XXXXXXXXXX) + export -p >$tmpf + printf "%s " "${@@Q}" >>$tmpf + echo >>$tmpf + + m sudo nsenter -t $pid -n --mount=/root/mount_namespaces/$ns sudo -u $USER -i bash -c ". $tmpf & sleep 1; rm $tmpf" +} + + mnsr() { # mns run local ns=$1 shift mns $ns sudo -u iank -E env "PATH=$PATH" "$@" } +mnsnonetr() { + ns=$1 + lomh + if ! s ip netns list | grep -Fx nonet &>/dev/null; then + s ip netns add nonet + fi + mns $ns --net=/var/run/netns/nonet /bin/bash + lomh +} + mnsnonet() { ns=$1 lomh @@ -2780,6 +2863,12 @@ myirc() { } +allmyirc() { + local d + d=/var/lib/znc/moddata/log/iank/freenode + ssh root@iankelling.org "cd $d; find . -mtime -60 -type f -exec grep '\ 15 )); then + i=$(( logcount - 15 )) + else + i=0 + fi + # usually do this on monday, sometimes later + if [[ $(date +%A) == Monday ]]; then + min_date=$(date -d 'monday 2 weeks ago' +%s) + else + min_date=$(date -d 'monday 3 weeks ago' +%s) + fi + for (( ; i < logcount; i++ )); do + log=${logs[$i]} + d=$(date -d "$(head -n1 $log|awk '{print $1}')" +%s) + if (( d < min_date )); then + continue + fi + if awk '$3 == "iank:"' $log | sed -r 's/^(.{10}).(.{8})[^ ]+(.*)/\1_\2\3/' | grep .; then + hr + fi + done + popd +} + + +# Tail all recent prof logs. Copying from profanity has unwanted line breaks +# especially for links. +profr() { + case $HOSTNAME in + kd) + profr-local + ;; + *) + ssh b8.nz profr-local + ;; + esac +} + +profr-local() { + local d0 d1 + local -a files + d0="$(date +%Y_%m_%d).log" + d1="$(date -d '1 day ago' +%Y_%m_%d).log" + ngset + files=(/d/p/profanity/chatlogs/iank_at_fsf.org/{*,rooms/*}/{$d0,$d1}) + ngreset + if (( ${#files[@]} > 0 )); then + cat "${files[@]}" | sort | tail -n 40 + fi +} + + +# Tail pms in the last day, for the case where we restart profanity and +# didn't check for pms beforehand. Assume the most recent logs are on kd. +# If that isn't the case, use prof-recent-local +prof-recent() { + case $HOSTNAME in + kd) + prof-recent-local + ;; + *) + ssh b8.nz prof-recent-local + ;; + esac +} +prof-recent-local() { + local d dates date files f + # consider making the day count passed by parameter. note: this works: $(date -d '2 day ago' +%Y_%m_%d) + dates=("$(date +%Y_%m_%d)" "$(date -d '1 day ago' +%Y_%m_%d)" ) + for d in /d/p/profanity/chatlogs/iank_at_fsf.org/!(rooms); do + files=() + for date in ${dates[@]}; do + f=$d/$date.log + if [[ -e $f ]]; then + files+=($f) + fi + done + if (( ${#files[@]} >= 1 )); then + cat ${files[@]} | tail + hr + fi + done +} + +prof-sort() { + case $HOSTNAME in + kd) + prof-recent-sort + ;; + *) + ssh b8.nz prof-recent-sort + ;; + esac +} + +prof-recent-sort() { + local d dates date files f + # consider making the day count passed by parameter. note: this works: $(date -d '2 day ago' +%Y_%m_%d) + dates=("$(date +%Y_%m_%d)" "$(date -d '1 day ago' +%Y_%m_%d)" ) + files=() + for d in /d/p/profanity/chatlogs/iank_at_fsf.org/!(rooms); do + for date in ${dates[@]}; do + f=$d/$date.log + if [[ -e $f ]]; then + files+=($f) + fi + done + done + for f in "${files[@]}"; do + sed "s/\$/ $f/" $f + done | sort } + # usage: debvm DEBIAN_VERSION RAM_MB debvm() { local ver ram fname src @@ -3241,9 +3445,12 @@ testexim() { # # -t = get recipient from header exim -d -t <&2 return 1 @@ -3393,7 +3600,11 @@ sdncmd() { # systemd namespace cmd unit=$1 shift pid=$(servicepid $unit) - m sudo nsenter -t $pid -n -m sudo -u $USER -i "$@" + tmpf=$(mktemp --tmpdir $unit.XXXXXXXXXX) + export -p >$tmpf + printf "%s " "${@@Q}" >>$tmpf + echo >>$tmpf + m sudo nsenter -t $pid -n -m sudo -u $USER -i bash -c ". $tmpf & rm $tmpf" } @@ -3498,11 +3709,33 @@ fixu() { # unmute um() { + local sink card + sink=$(pactl get-default-sink) + if [[ $sink != auto_null ]]; then + return + fi + + # guessing there is just one with an off profile. otherwise we will + # need some other solution, like storing the card identifier that we + # muted with nap. + card=$(pacmd list-cards | sed -n '/^[[:space:]]*index:/{s/^[[:space:]]*index://;h};/^[[:space:]]*active profile: $/{g;p;q}') + m pacmd set-card-profile "$card" output:analog-stereo + pactl set-sink-mute @DEFAULT_SINK@ false rm -f /tmp/ianknap } + nap() { - pactl set-sink-mute @DEFAULT_SINK@ true + local sink card + sink=$(pactl get-default-sink) + card="${sink%.*}" + card="${card/output/card}" + m pacmd set-card-profile "$card" off + + # clicking on a link in a browser can cause unmute. + # I don't want that. So, use a stronger form of mute + # than this. + #pactl set-sink-mute @DEFAULT_SINK@ true touch /tmp/ianknap } @@ -3594,7 +3827,13 @@ vpnoffc() { # vpn off client ser stop openvpn-client-tr@client } vpnc() { - ser start openvpn-client-tr@client + local unit + unit=openvpn-client-tr@client + sudo -v + if [[ $(systemctl is-active $unit) != active ]]; then + s systemctl start $unit + sleep 1 + fi } @@ -3663,6 +3902,14 @@ rgm() { # re all my files more expansively rem() { + local paths + paths="/p/c /b/" + find $paths -not \( -name .svn -prune -o -name .git -prune \ + -o -name .hg -prune -o -name .editor-backups -prune \ + -o -name .undo-tree-history -prune \) 2>/dev/null | grep -iP --color=auto -- "$*" ||: + rgv -- "$*" $paths /a/t.org /p/w.org /a/work.org ||: +} +reml() { # with limit to 5 matches per file local paths paths="/p/c /b" find $paths -not \( -name .svn -prune -o -name .git -prune \ @@ -3671,6 +3918,13 @@ rem() { rgv -m 5 -- "$*" $paths /a/t.org /p/w.org /a/work.org ||: } + +# for use in /f/bind +fupzone() { + # shellcheck disable=SC2046 # i want word splitting + ./update-zone $(i s | sed -rn 's/.*db\.(.*)/\1/p') +} + # setup: # pip3 install linode-cli # linode-cli @@ -3834,12 +4088,18 @@ fi # rg with respecting vcs ignore files rgv() { ret=0 + # settings that are turned off for pipes, keep them on. + # Found by searching for "terminal" in --help + # --heading + # -n + # # -. = search dotfiles # -z = search zipped files # -i = case insensitive # -M = max columns # --no-messages because of annoying errors on broken symlinks - command rg -. -z --no-messages -i -M 900 -g '!.git' -g '!auto-save-list' -g '!.savehist' "$@" || ret=$? + # --no-ignore-parent because i have /a/.git which ignores almost everything under it. + command rg -n --heading -. -z --no-messages -i -M 900 --no-ignore-parent -g '!.git' -g '!auto-save-list' -g '!.savehist' "$@" || ret=$? return $ret } @@ -3900,6 +4160,34 @@ path-add --end --ifexists $HOME/.rvm/bin # so its included in overall env +# ya, hacky hardcoded hostnames in 2023. we could do better +hssh-update() { + local -a failed_hosts hosts + case $HOSTNAME in + sy|kd) + hosts=( + kd x3.office.fsf.org syw + ) + ;; + x3) + hosts=( + b8.nz sywg.b8.nz + ) + ;; + esac + for host in ${hosts[@]}; do + e $host + if ! scp /b/fai/fai/config/files/usr/local/bin/hssh/IANK root@$host:/usr/local/bin/hssh; then + failed_hosts+=($host) + fi + done + if (( ${#failed_hosts[@]} >= 1 )); then + echo failed_hosts=${failed_hosts[*]} + return 1 + fi +} + + export BASEFILE_DIR=/a/bin/fai-basefiles #export ANDROID_HOME=/a/opt/android-home