#!/bin/bash
-# Copyright (C) 2019 Ian Kelling
-# SPDX-License-Identifier: AGPL-3.0-or-later
+# I, Ian Kelling, follow the GNU license recommendations at
+# https://www.gnu.org/licenses/license-recommendations.en.html. They
+# recommend that small programs, < 300 lines, be licensed under the
+# Apache License 2.0. This file contains or is part of one or more small
+# programs. If a small program grows beyond 300 lines, I plan to switch
+# its license to GPL.
+
+# Copyright 2024 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# this gets sourced. shebang is just for file mode detection
# Use source ~/.bashrc instead of doing bash -l when running a script
# so this can set extdebug and avoid the bash debugger.
-if [[ -s /a/bin/errhandle/err ]]; then
- source /a/bin/errhandle/err
-elif [[ -s $bashrc_dir/err ]]; then
- # shellcheck source=/a/bin/errhandle/err
- source $bashrc_dir/err
+
+
+if [[ -s /a/bin/bash-bear-trap/bash-bear ]]; then
+ # shellcheck source=/a/bin/bash-bear-trap/bash-bear
+ source /a/bin/bash-bear-trap/bash-bear
+ # wtf, shellcheck doesn't allow disabling warnings in elifs
+else
+ # bleh shellcheck can't handle disabling in an elif, so nesting this if.
+ # shellcheck disable=SC2154 # set in .bashrc
+ if [[ -s $bashrc_dir/bash-bear ]]; then
+ # shellcheck source=/a/bin/bash-bear-trap/bash-bear
+ source $bashrc_dir/bash-bear
+ fi
fi
# In t8, it runs clear_console for login shells by default. I don't want
export SSH_CONFIG_FILE_OVERRIDE=/root/.ssh/confighome
+
+
# emacs has a different default search path than the info command. This
-# adds the info defaults to emacs, but not the reverse, because I dun
+# adds the info defaults to emacs. This is commented because after
+# various upgrades this is no longer a problem: for the directories that
+# exist on my system, emacs already includes the ones that info
+# searches.
+#
+# but not the reverse, because I dun
# care much about the cli. The search path is only on the cli if you run
# "info xxx", or in emacs if you run '(info xxx)', so not that
-# important, but might as well fix it.
-
-# info info says this path is what was compiled, and its not documented
-# anywhere. Through source grepping, i found it in filesys.h of the info
-# source in trisquel flidas.
-#
-# Traling : means for emacs to add its own stuff on to the end.
+# important and i don't bother fixing it.
+
+# # info info says this path is what was compiled, and its not documented
+# # anywhere. Through source grepping, i found it in files.h of the info
+# # source in trisquel flidas.
+# #
+# # Trailing : means for emacs to add its own stuff on to the end.
+# #
+# # A problem with this is that directories which are not readable breaks info. And of course, this hard coding is not nice.
+# # I removed PATH from the start, because I've never seen an info file in PATH. And removed ".", because I can just specify the full file name in that case.
+# #
+# # https://raw.githubusercontent.com/debian-tex/texinfo/master/info/filesys.h
+# #
+
+# # note: to split up the var like this, do:
+# # IFS=:; printf '%s\n' $INFOPATH
+
+# dirs=(
+# /usr/local/info
+# /usr/info
+# /usr/local/lib/info
+# /usr/lib/info
+# /usr/local/gnu/info
+# /usr/local/gnu/lib/info
+# /usr/gnu/info
+# /usr/gnu/lib/info
+# /opt/gnu/info
+# /usr/share/info
+# /usr/share/lib/info
+# /usr/local/share/info
+# /usr/local/share/lib/info
+# /usr/gnu/lib/emacs/info
+# /usr/local/gnu/lib/emacs/info
+# /usr/local/lib/emacs/info
+# /usr/local/emacs/info
+# )
+
+# for d in ${dirs[@]}; do
+# if [[ -r $d ]]; then
+# INFOPATH="$d:$INFOPATH"
+# fi
+# done
+# unset d dirs
+
+
+# note: guix bash config does this automatically.
+if [[ $INFOPATH != *: ]]; then
+ INFOPATH="$INFOPATH:"
+fi
-export INFOPATH=$PATH:/usr/local/info:/usr/info:/usr/local/lib/info:/usr/lib/info:/usr/local/gnu/info:/usr/local/gnu/lib/info:/usr/gnu/info:/usr/gnu/lib/info:/opt/gnu/info:/usr/share/info:/usr/share/lib/info:/usr/local/share/info:/usr/local/share/lib/info:/usr/gnu/lib/emacs/info:/usr/local/gnu/lib/emacs/info:/usr/local/lib/emacs/info:/usr/local/emacs/info:.:
-# for openwrt system that has no stty, this is easier than
-# guarding every time i use it.
+# shadow stty on systems without it. Fixes annoying errors on openwrt
+# system that has no stty, this is easier than guarding every time i use
+# it.
if ! type -p stty >/dev/null; then
stty() { :; }
fi
use_color=true
fi
- if [[ $KONSOLE_PROFILE_NAME ]]; then
- TERM=xterm-256color
- fi
-
- if [[ $TERM == alacritty && ! -e /usr/share/terminfo/a/alacritty ]]; then
- # todo: we should try installing the alacritty terminfo if it is not found
- # https://github.com/alacritty/alacritty/issues/2838
- TERM=xterm-256color
- fi
-
- # copying from the alacritty example above,
- if [[ $TERM == xterm-kitty ]]; then
- if [[ ! -e /usr/share/terminfo/x/xterm-kitty ]]; then
- TERM=xterm-256color
- else
- if [[ -e /a/opt/kitty/shell-integration/bash/kitty.bash ]]; then
- KITTY_SHELL_INTEGRATION=t
- source /a/opt/kitty/shell-integration/bash/kitty.bash
- fi
- fi
- fi
-
# todo: not sure this works in sakura
#stty werase undef
#bind "\C-w": kill-region
# sakura == xterm-256color
# konsole == xterm
- if [[ $TERM != xterm-kitty && $TERM == xterm* ]]; then
+ if [[ $TERM == xterm* ]]; then
# control + arrow keys. for other terminals, see http://unix.stackexchange.com/questions/10806/how-to-change-previous-next-word-shortcut-in-bash
bind '"\e[1;5C": shell-forward-word' 2>/dev/null
bind '"\e[1;5D": shell-backward-word' 2>/dev/null
fi
-case $TERM in
- # fixup broken backspace in chroots
- xterm-kitty|alacritty)
- chroot() {
- TERM=xterm-256color command chroot "$@"
- }
- ;;
-esac
-
export BC_LINE_LENGTH=0
# ansible option
# i for insensitive. the rest from
# X means dont remove the current screenworth of output upon exit
# R means to show colors n things
+# a useful flag is -F aka --quit-if-one-screen
export LESS=RXij12
export SYSTEMD_LESS=$LESS
+
export NNN_COLORS=2136
export SL_FILES_DIR=/b/ds/sl/.iank
export SL_INFO_DIR=/p/sshinfo
+### begin pyenv ###
+
+# this is adapted from things printed to term after install
+# pyenv. commented for now since I'm not actually using pyenv.
+
+# export PYENV_ROOT="$HOME/.pyenv"
+# command -v pyenv &>/dev/null || export PATH="$PYENV_ROOT/bin:$PATH"
+# command -v pyenv &>/dev/null && eval "$(pyenv init -)"
+
+
+# output showed this example for pyenv-virtualenv, which i have no idea
+# what it is, but leaving it as a comment in case I end up doing python
+# dev.
+
+#eval "$(pyenv virtualenv-init -)"
+### end begin pyenv ###
+
+
+
# * include files
if [[ -s $bashrc_dir/path-add-function ]]; then
source $bashrc_dir/path-add-function
if [[ $SSH_CLIENT ]]; then
- if grep -qF /home/iank/.iank/e/e /etc/exports &>/dev/null; then
- export EMACSDIR=/home/iank/.iank/e/e
- fi
path-add $bashrc_dir
fi
fi
# bash: /usr/share/bashdb/bashdb-main.inc: No such file or directory
# bash: warning: cannot start debugger; debugging mode disabled
if [[ $SOE ]]; then
- if [[ -e /a/bin/errhandle/err ]]; then
- source /a/bin/errhandle/err
+ if [[ -e /a/bin/bash-bear-trap/bash-bear ]]; then
+ source /a/bin/bash-bear-trap/bash-bear
fi
fi
-# based on readme.debian. dunno if this will break on other distros.
-if [[ -s /usr/share/wcd/wcd-include.sh ]]; then
- source /usr/share/wcd/wcd-include.sh
-fi
-
+# go exists here
+path-add --ifexists /usr/local/go/bin
+# Wrap source command to look in alternate directory in conjunction with
+# sl().
mysrc() {
local path dir file
path=$1
dir=${path%/*}
file=${path##*/}
if [[ -s $path ]]; then
+ # shellcheck disable=SC1090 # this is dynamic, shellcheck can't follow it.
source $path
elif [[ -s $bashrc_dir/$file ]]; then
+ # shellcheck disable=SC1090 # this is dynamic, shellcheck can't follow it.
source $bashrc_dir/$file
fi
}
mysrc /a/bin/small-misc-bash/ll-function
mysrc /a/bin/distro-functions/src/package-manager-abstractions
+mysrc /a/bin/fai/fai/config/distro-install-common/bash-misc-funcs
+# things to remember:
+# ALT-C - cd into the selected directory
+# CTRL-T - Paste the selected file path into the command line
+#
+# good guide to some of its basic features is the readme file
+# https://github.com/junegunn/fzf
+
+# if [[ -s /usr/share/doc/fzf/examples/key-bindings.bash ]]; then
+# source /usr/share/doc/fzf/examples/key-bindings.bash
+# fi
+
+# this looks more interesting for more easily selecting multiple files:
+# https://github.com/peco/peco?tab=readme-ov-file#keymaps
+# This also looks like it could be customized more than fzf:
+# https://github.com/lotabout/skim
# * functions
-ccomp() { # copy completion
- local src=$1
- local c
+
+
+# temporary functions
+y() {
+ m "${@//spring/fall}"
+}
+
+### begin FSF section ###
+
+# Comments before functions are meant to be good useful
+# documentation. If they fail at that, please improve them or send Ian a
+# note.
+
+## copy bash completion
+#
+# It copies how the bash completion works from one command to other
+# commands. Generally just use within a .bashrc.
+#
+# Usage: ORIGINAL_COMMAND TARGET_COMMAND...
+#
+ccomp() {
+ local c src
+ src=$1
shift
if ! c=$(complete -p $src 2>/dev/null); then
_completion_loader $src &>/dev/null ||:
c=$(complete -p $src 2>/dev/null) || return 0
fi
# remove $src( .*|$)
- c=${c% $src}
- c=${c%% $src *}
+ c=${c% "$src"}
+ c=${c%% "$src" *}
eval $c $*
}
+## BEGIN functions to change directory better than cd ##
+#
+# The functions:
+#
+# c: acts like cd, but stores directory history: you could alias to cd if you wanted.
+# b: go back
+# f: go forward
+# cl: list recent directories and optionally choose one.
+#
+# Finer details you may want to skip:
+#
+# bl: print the list of back and forward directories.
+#
+# We keep 2 stacks of directories, forward and back. Unlike with a web
+# browser, the forward stack is not erased when going somewhere new.
+#
+# Recent directories are stored in ~/.cdirs or $DOT_CDIRS if it is set
+#
+declare -a _dir_forward _dir_back
+c() {
+ local cdirs
+ if [[ $DOT_CDIRS ]]; then
+ cdirs="$DOT_CDIRS"
+ else
+ cdirs=~/.cdirs
+ fi
+ # normally, the top of _dir_back is our current dir. if it isn't,
+ # put it on there, except we don't want to do that when we
+ # just launched a shell
+ if [[ $OLDPWD ]]; then
+ if (( ${#_dir_back[@]} == 0 )) || [[ ${_dir_back[-1]} != "$PWD" ]]; then
+ _dir_back+=("$PWD")
+ fi
+ fi
+ command cd "$@"
+ if (( ${#_dir_back[@]} == 0 )) || [[ ${_dir_back[-1]} != "$PWD" ]]; then
+ _dir_back+=("$PWD")
+ fi
+ echo "$PWD" >>"$cdirs"
+}
+ccomp cd c
+
+# back
+b() {
+ local top_back
+ if (( ${#_dir_back[@]} == 0 )); then
+ echo "nothing left to go back to" >&2
+ return 0
+ fi
+ top_back="${_dir_back[-1]}"
+
+ if [[ $top_back == "$PWD" ]] && (( ${#_dir_back[@]} == 1 )); then
+ echo "already on last back entry" >&2
+ return 0
+ fi
+
+
+ if [[ $top_back == "$PWD" ]]; then
+ # add to dirf if not already there
+ if (( ${#_dir_forward[@]} == 0 )) || [[ ${_dir_forward[-1]} != "$top_back" ]]; then
+ _dir_forward+=("$top_back")
+ fi
+ unset "_dir_back[-1]"
+ command cd "${_dir_back[-1]}"
+ else
+ if (( ${#_dir_forward[@]} == 0 )) || [[ ${_dir_forward[-1]} != "$PWD" ]]; then
+ _dir_forward+=("$PWD")
+ fi
+ command cd "$top_back"
+ fi
+
+ # Interesting feature, not sure I want it.
+ # give us a peek at what is next in the list
+ # if (( ${#_dir_back[@]} >= 2 )); then
+ # printf "%s\n" "${_dir_back[-2]}"
+ # fi
+ #
+
+ # c/b/f Implementation notes:
+ #
+ # The top of the back is $PWD
+ # as long as the last directory change was due to c,b,or cl.
+ #
+ # Example of stack changes:
+ #
+ # a b c (d)
+ ## back
+ # a b (c)
+ # d
+ #back
+ #a (b)
+ #d c
+ #back
+ #(a)
+ #d c b
+ #forward
+ #a (b)
+ #d c
+ #
+ # a b c
+ ## back
+ # a b
+ # (c)
+ ## forward
+
+}
+# forward
+f() {
+ local top_forward
+ if (( ${#_dir_forward[@]} == 0 )); then
+ echo "no forward dir left" >&2
+ return 0
+ fi
+ top_forward="${_dir_forward[-1]}"
+ unset "_dir_forward[-1]"
+ c "$top_forward"
+
+ # give us a peek at what is next in the list
+ # if (( ${#_dir_forward[@]} )); then
+ # printf "%s\n" "${_dir_forward[-1]}"
+ # fi
+}
+# cl = cd list
+cl() {
+ local i line input start
+ local -A buttondirs alines
+ local -a buttons dirs lines
+ local cdirs
+ if [[ $DOT_CDIRS ]]; then
+ cdirs="$DOT_CDIRS"
+ else
+ cdirs=~/.cdirs
+ fi
+ buttons=( {a..z} {2..9} )
+ if [[ ! -s "$cdirs" ]]; then
+ echo nothing in "$cdirs"
+ return 0
+ fi
+
+ i=0
+
+ mapfile -t lines <"$cdirs"
+ start=$(( ${#lines[@]} - 1 ))
+
+ # prune the history every once in a while.
+ if (( start > 500 )); then
+ tac "$cdirs" | awk '!seen[$0]++' | head -n 200 | tac | sponge "$cdirs" || [[ $? == 141 ]]
+ fi
+
+ for (( j=start; j >= 0; j-- )); do
+ line="${lines[$j]}"
+ if [[ ! $line || ${alines[$line]} || ! -d "$line" || $line == "$PWD" || line == "$HOME" ]]; then
+ continue
+ fi
+ alines[$line]=t
+ buttondirs[${buttons[i]}]="$line"
+ printf "%s %s\n" ${buttons[i]} "$line"
+ # the LINES bit is for when we have a short terminal, just dont print all
+ # the directories. alternative would be to do something like less the list.
+ if (( i == ${#buttons[@]} - 1 )) || { [[ $LINES ]] && (( i == LINES - 3 )); }; then
+ break
+ fi
+ i=$(( i + 1 ))
+ done
+
+ if (( i == 0 )); then
+ echo "no dirs in cdirs=$cdirs"
+ return 0
+ fi
+ read -sr -N 1 input
+ if [[ $input != $'\n' ]]; then
+ c "${buttondirs[$input]}"
+ fi
+}
+# bl = back list. lists the back and forward directories. i tend to
+# forget this exists and use cl instead.
+bl() {
+ local start i j max
+ max=10
+ start=$(( ${#_dir_back[@]} - 1 ))
+
+ # cleanup possible repeating of pwd
+ if (( start >= 0 )) && [[ ${_dir_back[$start]} == "$PWD" ]]; then
+ start=$(( start - 1 ))
+ fi
+ j=1
+ if (( start >= 0 )); then
+ for (( i=start; i >= 0 ; i-- )); do
+ printf "%s %s\n" $j ${_dir_back[i]}
+ j=$(( j + 1 ))
+ if (( j >= max )); then
+ break
+ fi
+ done
+ fi
+
+ max=10
+ start=$(( ${#_dir_forward[@]} - 1 ))
+
+ # cleanup possible repeating of pwd
+ if (( start >= 0 )) && [[ ${_dir_forward[$start]} == "$PWD" ]]; then
+ start=$(( start - 1 ))
+ fi
+ if (( start < 0 )); then
+ return 0
+ fi
+ echo --
+ j=1
+ for (( i=start; i >= 0 ; i-- )); do
+ printf "%s %s\n" $j ${_dir_forward[i]}
+ j=$(( j + 1 ))
+ if (( j >= max )); then
+ break
+ fi
+ done
+}
+# like running cl <enter> a <enter>
+cla() {
+ local line cdirs
+ mapfile -t lines <"$cdirs"
+ start=$(( ${#lines[@]} - 1 ))
+ for (( j=start; j >= 0; j-- )); do
+ line="${lines[$j]}"
+ if [[ ! $line || ! -d "$line" || $line == "$PWD" || line == "$HOME" ]]; then
+ continue
+ fi
+ e "$line"
+ c "$line"
+ break
+ done
+}
+## END functions to change directory better than cd ##
+
+# pee do. run args as a command with output copied to syslog.
+#
+# Usage: pd [-t TAG] COMMAND...
+#
+# -t TAG Override the tag in the syslog. The default is COMMAND with
+# any path part is removed, eg. for /bin/cat the tag is cat.
+#
+# You can view the log via "journalctl -t TAG"
+pd() {
+ local tag ret
+ ret=0
+ tag=${1##*/}
+ case $1 in
+ -t) tag="$2"; shift 2 ;;
+ esac
+ echo "PWD=$PWD command: $*" | logger -t $tag
+ "$@" |& pee cat "logger -t $tag" || ret=$?
+ echo "exited with status=$ret" | pee cat "logger -t $tag"
+ # this avoids any err-catch
+ (( ret == 0 )) || return $ret
+}
+ccomp time pd
+
+# jdo = journal do. Run command as transient systemd service, tailing
+# its output in the journal until it completes.
+#
+# Usage: jdo [-n UNIT_NAME] COMMAND...
+#
+# UNIT_NAME instead of using COMMAND as unit name, use UNIT_NAME. This is needed
+# in case COMMAND is already a unit.
+#
+# Compared to pd: commands recognize this is a non-interactive shell.
+# The service is unaffected if our ssh connection dies, no need to run
+# in screen or tmux.
+#
+# Note: There will be a few second delay at the start of the command,
+# and a second or so at the end.
+#
+# Note: Functions and aliases obviously won't work, we resolve the
+# command to a file.
+#
+# More comparison to others approaches: systemd-cat: fails if you do
+# systemctl daemon-reload, and I've had other approaches. I haven't yet
+# really tried logging with script, sudo, or screen.
+jdo() {
+ local cmd unit_name jr_pid ret sdrun_args sdrun_prefix
+ ret=0
+ if [[ $1 == -n ]]; then
+ unit_name="$2"
+ shift 2
+ fi
+
+ cmd="$1"
+ if [[ ! $unit_name ]]; then
+ unit_name=${cmd##*/}
+ fi
+
+ shift
+ if [[ $EUID == 0 ]]; then
+ sdrun_args="-E HOME=/root -E SSH_AUTH_SOCK=/run/openssh_agent"
+ else
+ # note, I don't use system --user because if it does sudo ssh, that
+ # will leave a process around that we can't kill and it will leave
+ # the unit hanging around in a failed state needing manual killing
+ # of the process.
+ sdrun_prefix=sudo
+ sdrun_args="--uid $(id -u) --gid $(id -g) -E SSH_AUTH_SOCK=/run/openssh_agent"
+ fi
+ if [[ $cmd != /* ]]; then
+ # we need to pass a full path to systemd
+ cmd=$(type -P "$cmd")
+ fi
+ #note date format for since is date '+%F %T'
+ # -q = quiet
+ journalctl --since=now -qn2 -f -u "$unit_name" &
+ jr_pid=$!
+ # sleep 1 is too fast for x200
+ sleep 3
+ $sdrun_prefix systemd-run $sdrun_args --unit "$unit_name" --wait --collect "$cmd" "$@" || ret=$?
+ # The sleep lets the journal output its last line
+ # before the prompt comes up.
+ sleep .5
+ kill $jr_pid &>/dev/null ||:
+ unset jr_pid
+ fg &>/dev/null ||:
+ # this avoids any err-catch
+ (( ret == 0 )) || return $ret
+}
+ccomp time jdo
+
+# standard date as used in logs
+datelog() {
+ date +%Y-%m-%d "$@"
+}
+
+# date in log appropriate format
+dtl() {
+ date "+%F %T" "$@"
+}
+
+# ts formatted
+tsf() {
+ command ts "%F %T" "$@"
+}
+
+# ts log. log command to log file.
+# usage: tsl LOG_PATH_PREFIX COMMAND...
+# example: tsl /root/command
+# log file will be like /root/command-2024-02-10.log
+#
+# Caveats: If there is a user prompt, like "read -p prompt var", it will
+# hang without outputting the prompt. Sometimes some output will get
+# hidden until you hit enter.
+#
+tsl() {
+ local log_prefix log_path appending ret
+ if (( $# < 2 )); then
+ echo "tsl: error: expected >= 2 arguments, got $#" >&2
+ return 1
+ fi
+ log_prefix="$1"
+ if [[ $log_prefix == */* && ! -d ${log_prefix%*/} ]]; then
+ echo "tsl: error: expected directory at ${log_prefix%*/}" >&2
+ return 1
+ fi
+ log_path=$log_prefix-$(date +%Y-%m-%d).log
+ appending=false
+ if [[ -s $log_path ]]; then
+ appending=true
+ fi
+ shift
+ printf "%s\n" "CWD: $PWD, log: $log_path, running $*" | ts "%F %T" | tee -a "$log_path"
+ ret=0
+ "$@" |& ts "%F %T" | tee -a "$log_path" || ret=$?
+ printf "%s\n" "exit code $ret from command: $*" | ts "%F %T" | tee -a "$log_path"
+ if $appending; then
+ printf "%s\n" "note: this log file contains logs before those of previous command" | ts "%F %T" | tee -a "$log_path"
+ fi
+}
+
+# Gets all the essential disk info on a machine. In case it gets ill, this
+# will help us know the expected state.
+disk-info() {
+ local cmds cmd
+ mapfile -t cmds <<'EOF'
+tail -n +1 /proc/mdstat /etc/mdadm/mdadm.conf /etc/fstab /etc/crypttab
+lsblk
+blkid
+ls -la /dev/disk/by-id
+EOF
+
+ for cmd in "${cmds[@]}"; do
+ cat <<EOF
+### $cmd
+
+\`\`\`
+EOF
+ $cmd
+ cat <<'EOF'
+
+```
+
+EOF
+ done
+}
+
+# Send a screencast in rtp. For libreplanet testing of local stream.
+screenrtp() {
+ local ip port xoffset
+ read -r ip port xoffset <<<"$@"
+
+ setxenv
+
+ if [[ ! $port ]]; then
+ port=9999
+ fi
+
+ while true; do
+ # By default, plugged in screen goes to the right side, so we need an
+ # offset that is the same as the laptop's x resolution. If we are in
+ # mirror mode, then we don't need an offset.
+ if [[ ! $xoffset ]]; then
+ xoffset=0
+ laptop_x=$(xrandr | awk '$1 == "LVDS-1" {print $4}' | sed 's/x.*//') || { sleep 1; continue; }
+ total_x=$(xdpyinfo| awk '$1 == "dimensions:" {print $2}' | sed 's/x.*//') || { sleep 1; continue; }
+ screen2_res=$(xrandr | awk '$2 == "connected" && $1 != "LVDS-1" { print $3 }' | sed 's/+.*//')
+ if (( laptop_x < total_x )); then
+ xoffset=$laptop_x
+ fi
+ fi
+
+ m ffmpeg -probesize 50M -thread_queue_size 50 \
+ -video_size $screen2_res -f x11grab -framerate 30 -i :0.0+$xoffset.0 \
+ -vcodec libx264 -g 1 -tune zerolatency -preset ultrafast -pix_fmt yuv420p -x264-params repeat-headers=1 \
+ -f rtp_mpegts rtp://$ip:$port ||:
+
+
+ sleep 1
+ done
+}
+
+# Set the basic X related environment variables we don't have in some
+# cases.
+setxenv() {
+ if [[ ! $DISPLAY ]]; then
+ export DISPLAY=:0.0
+ fi
+ if [[ ! $XAUTHORITY ]]; then
+ export XAUTHORITY=$HOME/.Xauthority
+ fi
+}
+
+d16fan() {
+ apt -y install thinkfan lm-sensors
+ modprobe -a w83795 w83627ehf
+ # from fsf ansible
+ cat << EOF > /etc/thinkfan.conf
+(20, 0, 55)
+(40, 50, 60)
+(85, 55, 65)
+(130, 60, 70)
+(175, 65, 75)
+(210, 70, 80)
+(255, 75, 32767)
+EOF
+ find /sys/ | grep 'temp[1-8]_input' | xargs -i echo hwmon {} >> /etc/thinkfan.conf
+ find /sys/module/w83795/drivers/*/*/pwm1 | sort | head -1 | xargs -i echo pwm_fan {} >> /etc/thinkfan.conf
+ systemctl restart thinkfan
+}
+
+
+#### end fsf section
+
..() { c ..; }
...() { c ../..; }
....() { c ../../..; }
-.....() { c ../../../..; }
+.....() { c ../../../j..; }
......() { c ../../../../..; }
+# Given a file, output a here document which rewrites the file.
chere() {
local f path
for f; do
# file cut copy and paste, like the text buffers :)
# I havnt tested these.
_fbufferinit() { # internal use
- ! [[ $my_f_tempdir ]] && my_f_tempdir=$(mktemp -d)
+ ! [[ $my_f_tempdir ]] && my_f_tempdir="$(mktemp -d)"
rm -rf "${my_f_tempdir:?}"/*
}
fcp() { # file cp
cp "$my_f_tempdir"/* "$target"
}
-_khfix_common() {
- local host ip port file key
- read -r host ip port < <(timeout -s 9 2 ssh -oBatchMode=yes -oControlMaster=no -oControlPath=/ -v $1 |& sed -rn "s/debug1: Connecting to ([^ ]+) \[([^\]*)] port ([0-9]+).*/\1 \2 \3/p" ||: )
+_khfix-common() {
+ local host ip port file key tmp ssh_host alias
+ ssh_host=$1
+ {
+ read -r host ip port
+ read -r alias;
+ # note ":graph:" is needed or else we get a trailing \r out of ssh,
+ # dunno why. web search says terminals add \r, so I tried adding -T
+ # to turn off psuedo terminal, but it didnt help.
+ } < <(timeout -s 9 2 ssh -TN -oBatchMode=yes -oControlMaster=no -oControlPath=/ -v $ssh_host |&
+ sed -rn "s/debug1: Connecting to ([^ ]+) \[([^\]*)] port ([0-9]+).*/\1 \2 \3/p;
+s/^debug1: using hostkeyalias: ([[:graph:]]*).*/\1/p" ||: )
file=$(readlink -f ~/.ssh/known_hosts)
if [[ ! $ip ]]; then
echo "khfix: ssh failed"
return 1
fi
+ ip_entry=$ip
+ host_entry=$host
+ if [[ $alias ]]; then
+ host_entry="$alias"
+ fi
if [[ $port != 22 ]]; then
ip_entry="[$ip]:$port"
- host_entry="[$host]:$port"
- else
- ip_entry=$ip
- host_entry=$host
+ if [[ ! $alias ]]; then
+ host_entry="[$host]:$port"
+ fi
fi
- tmpfile=$(mktemp)
- if [[ $host != $ip ]]; then
- key=$(ssh-keygen -F "$host_entry" -f $file | sed -r 's/^.*([^ ]+ +[^ ]+) *$/\1/')
+ if [[ $host_entry != "$ip_entry" ]]; then
+ tmp=$(mktemp)
+ ssh-keygen -F "$host_entry" -f $file >$tmp || [[ $? == 1 ]] # 1 when it doesnt exist in the file
+ if [[ -s $tmp ]]; then
+ key=$(sed -r 's/^.*([^ ]+ +[^ ]+) *$/\1/' $tmp)
+ else
+ echo "khfix WARNING: did not find host entry:$host_entry in known_hosts"
+ fi
+ rm $tmp
if [[ $key ]]; then
grep -Fv "$key" "$file" | sponge "$file"
fi
+ key=
fi
- key=$(ssh-keygen -F "$ip_entry" -f $file | sed -r 's/^.*([^ ]+ +[^ ]+) *$/\1/')
+ tmp=$(mktemp)
+ ssh-keygen -F "$ip_entry" -f $file >$tmp || [[ $? == 1 ]]
+ if [[ -s $tmp ]]; then
+ key=$(sed -r 's/^.*([^ ]+ +[^ ]+) *$/\1/' $tmp)
+ else
+ echo "khfix WARNING: did not find ip entry:$ip_entry in known_hosts"
+ fi
+ rm $tmp
if [[ $key ]]; then
grep -Fv "$key" "$file" | sponge "$file"
fi
- ll ~/.ssh/known_hosts
- rootsshsync
}
-khfix() { # known hosts fix
- _khfix_common "$@" || return 1
+khfix-r() { # known hosts fix without syncing to root user
+ _khfix-common "$@" || return 1
ssh $1 :
}
-khcopy() {
- _khfix_common "$@"
- ssh-copy-id $1
+khfix() {
+ _khfix-common "$@" || return 1
+ ssh $1 :
+ rootsshsync
}
+# copy path into clipboard
a() {
local x
x=$(readlink -nf "${1:-$PWD}")
- # yes, its kinda dumb that xclip/xsel cant do this in one invocation
- echo -n "$x" | xclip -selection clipboard
- echo -n "$x" | xclip
+ # yes, its kinda dumb that xclip/xsel cant do this in one invocation.
+ # And, summarizing this:
+ # https://askubuntu.com/questions/705620/xclip-vs-xsel
+ # xclip has a few more options. xclip has a bug in tmux / forwarded x sessions.
+ cbs "$x"
+}
+
+# clipboard a string (into selection & clipboard buffer)
+cbs() {
+ # yes, its kinda dumb that xclip/xsel cant do this in one invocation.
+ # And, summarizing this:
+ # https://askubuntu.com/questions/705620/xclip-vs-xsel
+ # xclip has a few more options. xclip has a bug in tmux / forwarded x sessions.
+ printf "%s" "$*" | xclip -selection clipboard
+ printf "%s" "$*" | xclip
}
# a1 = awk {print $1}
done
# h1 = head -n1
for num in {1..9}; do
- eval h$num"() { head -n$num; }"
+ eval h$num"() { head -n$num || [[ \$? == 141 ]]; }"
done
-b() {
- # backwards
- c -
-}
-
hexipv4() {
+ # shellcheck disable=SC2046 disable=SC2001 disable=SC2183 # hacks, expected
printf '%d.%d.%d.%d\n' $(echo $1 | sed 's/../0x& /g')
}
vp9() {
- local f out outdir in
+ local f out outdir in fname origdir skip1
+ origdir="$PWD"
outdir=vp9
- case $1 in
- --out)
- outdir=$2
- shift 2
- ;;
- esac
+ skip1=false
+ while [[ $1 == -* ]]; do
+ case $1 in
+ # if we got interrupted after 1st phase
+ -2)
+ skip1=true
+ shift
+ ;;
+ --out)
+ outdir=$2
+ shift 2
+ ;;
+ esac
+ done
m mkdir -p $outdir
+ # first pass only uses about 1 cpu, so run in parallel
for f; do
- out=$PWD/$outdir/$f
- in=$PWD/$f
- m cd $(mktemp -d)
- pwd
- m ffmpeg -threads 0 -i $in -g 192 -vcodec libvpx-vp9 -vf scale=-1:720 -max_muxing_queue_size 9999 -b:v 750K -pass 1 -an -f null /dev/null
- m ffmpeg -y -threads 0 -i $in -g 192 -vcodec libvpx-vp9 -vf scale=-1:720 -max_muxing_queue_size 9999 -b:v 750K -pass 2 -c:a libvorbis -qscale:a 5 $out
- cd -
+ {
+ fname="${f##*/f}"
+ if [[ $f == /* ]]; then
+ in="$f"
+ else
+ in=$origdir/$f
+ fi
+ out="$origdir/$outdir/$fname"
+ mkdir -p /tmp/vp9/$fname
+ cd /tmp/vp9/$fname
+ if ! $skip1 && [[ ! -s ffmpeg2pass-0.log ]]; then
+ # -nostdin or else wait causes ffmpeg to go into stopped state. dunno why, random stackoverflow answer.
+ m ffmpeg -nostdin -hide_banner -loglevel error -i $in -g 192 -vcodec libvpx-vp9 -vf scale=-1:720 -max_muxing_queue_size 9999 -b:v 750K -pass 1 -an -f null /dev/null
+ fi
+ if [[ -e $out ]]; then rm -f $out; fi
+ m ffmpeg -nostdin -hide_banner -loglevel error -y -i $in -g 192 -vcodec libvpx-vp9 -tile-rows 2 -vf scale=-1:720 -max_muxing_queue_size 9999 -b:v 750K -pass 2 -c:a libvorbis -qscale:a 5 $out
+ } &
done
+ wait -f
+ cd "$origdir"
}
utcl() { # utc 24 hour time to local hour 24 hour time
echo "print( ($1 $(date +%z | sed -r 's/..$//;s/^(-?)0*/\1/')) % 24)"|python3
}
-# c. better cd
-if type -p wcd &>/dev/null; then
- if [[ $LC_INSIDE_EMACS ]]; then
- c() { wcd -c -z 50 -o "$@"; }
- else
- # lets see what the fancy terminal does from time to time
- c() { wcd -c -z 50 "$@"; }
- fi
-else
- c() { cd "$@"; }
-fi
-ccomp cd c
+bwm() {
+ s bwm-ng -T avg -d
+}
+
+# for running in a fai rescue. iank specific.
+kdrescue() {
+ d=vgata-Samsung_SSD_850_EVO_2TB_S2RLNX0J502123D
+ for f in $d vgata-Samsung_SSD_870_QVO_8TB_S5VUNG0N900656V; do
+ cryptsetup luksOpen --key-file /p /dev/$f/root crypt-$f-root
+ cryptsetup luksOpen --key-file /p /dev/$f/o crypt-$f-o
+ done
+ mount -o subvol=root_trisquelaramo /dev/mapper/crypt-$d-root /mnt
+ mount -o subvol=a /dev/mapper/crypt-$d-root /mnt/a
+ mount -o subvol=o /dev/mapper/crypt-$d-o /mnt/o
+ mount -o subvol=boot_trisquelaramo /dev/sda2 /mnt/boot
+ cd /mnt
+ chrbind
+}
+
+
+
+# cat exim4
c4() { c /var/log/exim4; }
+# commit all amend
caa() { git commit --amend --no-edit -a; }
-cf() {
- for f; do
- hr
- echo "$f"
- hr
- cat "$f"
- done
-}
+# cat + hr all files found by find.
caf() {
- # shellcheck disable=SC2033
+ local file
find -L "$@" -type f -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
- -o -name .undo-tree-history -prune \) \
- -exec bash -c '. ~/.bashrc; hr; echo "$1"; hr; cat "$1"' _ {} \; 2>/dev/null
-
+ -o -name .undo-tree-history -prune \) -printf '%h\0%d\0%p\n' | sort -t '\0' -n \
+ | awk -F '\0' '{print $3}' 2>/dev/null | while read -r file; do
+ hr "$file"
+ v "$file"
+ # if the file is nonempty and the last char is nonempty, it is not
+ # newline terminated.
+ if [[ -s "$file" && "$(tail -c 1 "$file")" ]]; then
+ echo
+ fi
+ done
}
ccomp cat cf caf
+# calculator
calc() { echo "scale=3; $*" | bc -l; }
-# no having to type quotes, but also no command history:
+# calculator without needing to type quotes, but also no command history:
clc() {
local x
read -r x
echo "scale=3; $x" | bc -l
}
+# quick chmod +X
+cx() {
+ chmod +X "$@"
+}
+
+# commit all with message
cam() {
git commit -am "$*"
}
}
ccomp grep ccat
+# Setup chroot bindmounts (idempotently)
chrbind() {
local d
# dev/pts needed for pacman signature check
fi
done
}
+# unmount chroot bindmounts
chumount() {
local d
# dev/pts needed for pacman signature check
- for d in dev proc sys dev/pts; do
+ for d in dev/pts dev proc sys; do
[[ -d $d ]]
if mountpoint $d &>/dev/null; then
m s umount $d
echo "$pastline" >> "$2"
}
+# config diff. (for very common config format or two)
cdiff() {
- # diff config files,
# setup for format of postfix, eg:
# option = stuff[,]
# [more stuff]
done < "$unified"
}
-
+# cat new files as they appear in $1.
cat-new-files() {
local start=$SECONDS
local dir="$1"
- inotifywait -m "$dir" -e create -e moved_to |
- # shellcheck disable=SC2030
+ # shellcheck disable=SC2030
+ inotifywait -m "$dir" -e create -e moved_to | \
while read -r filedir _ file; do
cat "$filedir$file"
hr
}
+# quicker chown to my user
+chownme() {
+ s chown -R $USER:$USER "$@"
+}
+
+# Better chown defaults.
# shellcheck disable=SC2032
chown() {
# makes it so chown -R symlink affects the symlink and its target.
fi
}
+# commit with message
cim() {
git commit -m "$*"
}
-cl() {
- # choose recent directory. cl = cd list
- c =
-}
-
+# push a process down.
d() { builtin bg "$@"; }
ccomp bg d
+# faster fg.
+# f would be more natural, but i already am using it for something
+z() { builtin fg "$@"; }
+ccomp fg z
+
+# quick kill.
+x() { builtin kill %%; }
+
+# diff content, aka word diff.
dc() {
- diff --strip-trailing-cr -w "$@" # diff content
+ diff --strip-trailing-cr -w "$@"
}
ccomp diff dc
+# remove spaces from file name(s)
despace() {
local x y
for x in "$@"; do
done
}
+# df progress
+# usage: dfp MOUNTPOINT [SECOND_INTERVAL]
+# SECOND_INTERVAL defaults to 90
+dfp() {
+ # mp = mountpoint
+ local a b mp interval
+ mp=$1
+ interval=${2:-90}
+ if [[ ! $mp ]]; then
+ echo "dfp: error, missing 1st arg" >&2
+ return 1
+ fi
+ while true; do
+ a=$(df --output=used $mp | tail -n1)
+ sleep $interval
+ b=$(df --output=used $mp | tail -n1)
+ printf "used mib: %'d mib/min: %s\n" $(( b /1000 )) $(( (b-a) / (interval * 1000 / 60 ) ))
+ done
+}
+
+# get ipv4 ip from HOST. or if it is already a number, return that
+hostip() {
+ local host="$1"
+ case $host in
+ [0-9:])
+ echo "$host"
+ ;;
+ *)
+ getent ahostsv4 "$host" | awk '{ print $1 }' | head -n1
+ ;;
+ esac
+}
+
+# dig nicer defaults
dig() {
command dig +nostats +nocmd "$@"
}
-# Output with sections sorted, and removal of query id, so 2 dig outputs can be diffed.
+
+# dig "$@" and output with sections sorted, and removal of query id, so
+# 2 dig outputs can be diffed.
digsort() {
local sec
sec=
diff -u /tmp/digdiff <(digsort $s2 "$@")
}
+# date in a format i like reading
dt() {
date "+%A, %B %d, %r" "$@"
}
-ccomp date dt
+dtr() {
+ date -R "$@"
+}
+# date with all digits in a format i like
+dtd() {
+ date +%F_%T% "$@"
+}
+ccomp date dt dtr dtd
-dus() { # du, sorted, default arg of
+# du sorted. Default target of *.
+dus() {
du -sh ${@:-*} | sort -h
}
ccomp du dus
-
-e() { echo "$@"; }
+# echo
+e() { printf "%s\n" "$*"; }
+err() { echo "$(date +%Y-%m-%d) ${FUNCNAME[0]}: $*" >&2; }
# echo args
ea() {
printf "%s" "${arg}" |& hexdump -C
done
}
-# echo vars. print var including escapes, etc
+
+# echo variables. print var including escapes, etc, like xxd for variable
ev() {
if (( ! $# )); then
echo no args
done
}
+# emacs ediff from cli
ediff() {
[[ ${#@} == 2 ]] || { echo "error: ediff requires 2 arguments"; return 1; }
emacs --eval "(ediff-files \"$1\" \"$2\")"
}
+# tail rspam log
+rspamta() {
+ ta "$@" /var/log/rspamd/rspamd.log
+}
+
+
+_rspamtag() {
+ tailf /var/log/rspamd/rspamd.log | grp "$@"
+}
+# tail rspam log + grep $@
+rspamtag() {
+ bn _rspamtag "$@"
+}
+
+
+_etailg() {
+ ngset
+ tailf /var/log/exim4/mainlog /var/log/exim4/*main /var/log/exim4/paniclog /var/log/exim4/*panic -n 200 | grp "$@"
+ ngreset
+}
+# etail + grep.
+etailg() {
+ bn _etailg "$@"
+}
# mail related
+# shellcheck disable=SC2120 # we expect to pass arguments in use outside this file
etail() {
- tail -F /var/log/exim4/mainlog -n 200 "$@"
+ ngset
+ ta /var/log/exim4/mainlog /var/log/exim4/*main /var/log/exim4/paniclog /var/log/exim4/*panic -n 200 "$@"
+ ngreset
+}
+etailm() {
+ ta /var/log/exim4/mainlog -n 200 "$@"
+}
+etail2() {
+ ta /var/log/exim4/nondmain -n 200 "$@"
+}
+
+# shortcut for tail -F + highlighting if we have it.
+tailf() {
+ if type -t batcat >/dev/null; then
+ # note: another useful useful style is "header"
+ tail -F "$@" | batcat --color always --style plain --theme Coldark-Cold -P
+ else
+ tail -F "$@"
+ fi
+}
+
+# tail -F but keep our shell line untouched..
+ta() {
+ bn tailf "$@"
+}
+_tag() {
+ local file
+ file="$1"
+ tailf "$file" | gr --line-buffered "$@"
+}
+# ta + grep
+tag() {
+ bn _tag "$@"
+}
+
+ccomp tail etail etail2 ta
+
+_cron-test() {
+ tailf /var/log/syslog | gr --line-buffered cron
+}
+# tool to manually test cronjobs
+cron-test() {
+ echo "cron will check for new files in about $(( 60 - $(date +%S) + 2 )) seconds"
+ bn _cron-test
+}
+
+# ssh + cat both auth keys files.
+showkeys() {
+ ssh "$@" cat .ssh/authorized_keys{,2}
}
-ccomp tail etail
# print exim old pids
eoldpids() {
fi
for pid in $(pgrep -f '^/usr/sbin/exim4( |$)'); do
# the daemonpid gets reexeced on HUP (service reloads), keeping its same old timestamp
- if [[ $pid == $daemonpid ]]; then
+ if [[ $pid == "$daemonpid" ]]; then
continue
fi
piduptime=$(awk -v ticks="$(getconf CLK_TCK)" 'NR==1 { now=$1; next } END { printf "%9.0f\n", now - ($20/ticks) }' /proc/uptime RS=')' /proc/$pid/stat) ||: # sometimes pids disappear pretty fast
}
# exim watch as old pids go away
ewatchold() {
- local configtime pid piduptime now
+ local configtime pid piduptime now tmpstr
local -i count
local -a oldpids
count=0
while true; do
- oldpids=($(eoldpids))
+ tmpstr=$(eoldpids)
+ mapfile -t oldpids <<<"$tmpstr"
if (( ! ${#oldpids[@]} )); then
return
fi
done
}
+# eximlog less
eless() {
less /var/log/exim4/mainlog
}
ccomp less eless
+# exim queue cat
eqcat() {
- exiqgrep -i -o 60 | while read -r i; do
+ exiqgrep -ir.\* -o 60 | while read -r i; do
hlm exim -Mvc $i
echo
hlm exigrep $i /var/log/exim4/mainlog | cat ||:
done
}
+# do like rm -f for messages in the exim queue.
eqrmf() {
- exiqgrep -i | xargs exim -Mrm
+ # other ways to get the list of message ids:
+ # exim -bp | awk 'NF == 4 {print $3}'
+ # # this is slower 160ms, vs 60.
+ # exipick -i
+ exiqgrep -ir.\* | xargs exim -Mrm
}
-
+# create a development exim config directory.
econfdevnew() {
rm -rf /tmp/edev
mkdir -p /tmp/edev/etc
find /tmp/edev/etc/exim4 -type f -execdir sed -i "s,/etc/,/tmp/edev/etc/,g" '{}' +
econfdev
}
+# build a development exim config.
econfdev() {
update-exim4.conf -d /tmp/edev/etc/exim4 -o /tmp/edev/e.conf
}
+# exim log grep for incoming.
+# show important information about incoming mail in the exim log
+egrin() {
+ sed -rn '/testignore|jtuttle|eximbackup/!s/^[^ ]+ ([^ ]+) [^ ]+ [^ ]+ <= ([^ ]+).*T="(.*)" from (<[^ ]+> .*$)/\1 \4\n \3/p' <${1:-/var/log/exim4/mainlog}
+}
+# exim log grep for incoming + show message-ids.
+# 2nd line is message-id:
+egrinid() {
+ sed -rn '/testignore|jtuttle|eximbackup/!s/^[^ ]+ ([^ ]+) [^ ]+ [^ ]+ <= ([^ ]+).* id=([^ ]+) T="(.*)" from (<[^ ]+> .*$)/\1 \5\n \3\n \4/p' <${1:-/var/log/exim4/mainlog}
+}
+# exim log tail + filter for incoming messages.
+etailin() {
+ local -a tail_arg
+ tail_arg=(-n500)
+ if [[ $1 ]]; then
+ tail_arg=($@)
+ fi
+ tail "${tail_arg[@]}" -F /var/log/exim4/mainlog | sed -rn '/testignore|jtuttle|eximbackup/!s/^[^ ]+ ([^ ]+) [^ ]+ [^ ]+ <= ([^ ]+).*T="(.*)" from (<[^ ]+> .*$)/\1 \4\n \3/p'
+}
-# shellcheck disable=SC2032
-f() {
- # cd forward
- c +
-}
+# find array. make an array of file names found by find into $x
+# argument: find arguments
+# return: find results in an array $x
fa() {
- # find array. make an array of file names found by find into $x
- # argument: find arguments
- # return: find results in an array $x
while read -rd ''; do
x+=("$REPLY");
done < <(find "$@" -print0);
}
-faf() { # find all files. use -L to follow symlinks
+# find all files. use -L to follow symlinks
+# shellcheck disable=SC2120
+faf() {
find "$@" -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) -type f 2>/dev/null
}
-# todo: id like to do maybe a daily or hourly cronjob to
-# check that my history file size is increasing. Ive had it
-# inexplicably truncated in the past.
-histrm() {
- history -n
- history | awk -v IGNORECASE=1 '{ a=$1; sub(/^( *[^ ]+){4} */, "") }; /'"$*"'/'
- read -p "press anything but contrl-c to delete"
- for entry in $(history | awk -v IGNORECASE=1 '{ a=$1; sub(/^( *[^ ]+){4} */, "") }; /'"$*"'/ { print a }' | tac); do
- history -d $entry
+# ffmpeg concat files.
+# usage ffconcat FILES_TO_CONCAT OUTPUT_FILE
+ffconcat() {
+ local tmpf
+ tmpf=$(mktemp)
+ printf "file '%s'\n" "$1" >$tmpf
+ while (( $# > 1 )); do
+ shift
+ printf "file '%s'\n" "$1" >>$tmpf
done
- history -w
+ # https://trac.ffmpeg.org/wiki/Concatenate
+ ffmpeg -f concat -safe 0 -i $tmpf -c copy "$1"
+ rm $tmpf
+}
+# ffmpeg remux files.
+ffremux() {
+ local tmpf tmpd
+ if (( $# == 0 )); then
+ echo ffremux error expected args >&2
+ return 1
+ fi
+ tmpd="$(mktemp -d)"
+ for f; do
+ tmpf=$tmpd/"${f##*/}"
+ ffmpeg -i "$f" -c:v copy -c:a copy $tmpf
+ cat $tmpf >"$f"
+ done
+ rm -r $tmpd
}
-# mail related
+
+
+# absolute path of file/dir without resolving symlinks.
+#
+# Most of the time, I want this where I would normally use readlink.
+# This is what realpath -s does in most cases, but sometimes it
+# actually resolves symlinks, at least when they are in /.
+#
+# Note, if run on a dir, if the final component is relative, it won't
+# resolve that. Use the below fpd for that.
+#
+# note: we could make a variation of this which
+# assigns to a variable name using eval, so that we don't have to do
+# x=$(fp somepath), which might save subshell overhead and look nice,
+# but I'm not going to bother.
+fp() {
+ local initial_oldpwd initial_pwd dir base
+ initial_oldpwd="$OLDPWD"
+ initial_pwd="$PWD"
+ if [[ $1 == */* ]]; then
+ dir="${1%/*}"
+ base="/${1##*/}"
+ # CDPATH because having it set will cause cd to possibly print output
+ CDPATH='' cd "$dir"
+ printf "%s%s\n" "$PWD" "$base"
+ CDPATH='' cd "$initial_pwd"
+ OLDPWD="$initial_oldpwd"
+ else
+ printf "%s/%s\n" "$PWD" "$1"
+ fi
+}
+# full path of directory without resolving symlinks
+fpd() {
+ local initial_oldpwd initial_pwd dir
+ initial_oldpwd="$OLDPWD"
+ initial_pwd="$PWD"
+ dir="$1"
+ CDPATH='' cd "$dir"
+ printf "%s%s\n" "$PWD" "$base"
+ cd "$initial_pwd"
+ OLDPWD="$initial_oldpwd"
+}
+
+# cat exim frozen messages in a nice format.
frozen() {
rm -rf /tmp/frozen
sudo mailq |gr frozen|awk '{print $3}' | while read -r id; do
echo -e '\n\n##############################\n'
done | tee -a /tmp/frozen
}
+# nice deletion of exim frozen messages.
frozenrm() {
local ids=()
while read -r line; do
printf '%s\n' "$line"
- ids+=($(printf '%s\n' "$line" |gr frozen|awk '{print $3}'))
+ ids+=("$(printf '%s\n' "$line" |gr frozen|awk '{print $3}')")
done < <(s mailq)
echo "sleeping for 2 in case you change your mind"
sleep 2
sudo exim -Mrm "${ids[@]}"
}
+# like -e for functions. returns on error.
+# at the end of the function, disable with:
+# trap ERR
funce() {
- # like -e for functions. returns on error.
- # at the end of the function, disable with:
- # trap ERR
trap 'echo "${BASH_COMMAND:+BASH_COMMAND=\"$BASH_COMMAND\" }
${FUNCNAME:+FUNCNAME=\"$FUNCNAME\" }${LINENO:+LINENO=\"$LINENO\" }\$?=$?"
trap ERR
return' ERR
}
+# see help=
getdir () {
local help="Usage: getdir [--help] PATH
Output the directory of PATH, or just PATH if it is a directory."
git clean -fdx
}
+# see help=
# shellcheck disable=SC2120
gitroot() {
local help="Usage: gitroot [--help]
echo "${p%%/.git}"
}
-g() {
-
- # todo: patch emacs so it will look elsewhere. this is kinda sad:
- # https://emacs.stackexchange.com/questions/4253/how-to-start-emacs-with-a-custom-user-emacs-directory
-
- local args gdb=false
-
- if [[ $EMACSDIR ]]; then
- path-add "$EMACSDIR/lib-src" "$EMACSDIR/src"
- fi
-
- if [[ $DISPLAY ]]; then
- args=-n
- fi
-
- if (( $# == 0 )); then
- args+=" -c"
- fi
- # duplicate -c, but oh well
- if ! pgrep -u $EUID emacsclient; then
- if (( $# == 0 )) && type -p gdb &>/dev/null; then
- gdb=true
- else
- args+=" -c"
- fi
- fi
- if [[ $EMACSDIR ]]; then
- # Alter the path here, otherwise the nfs mount gets triggered on the
- # first path lookup when emacs is not being used.
- PATH="$EMACSDIR/lib-src:$EMACSDIR/src:$PATH" EHOME=$HOME HOME=$EMACSDIR m emacsclient -a "" $args "$@"
- else
- if $gdb; then
- # due to a bug, we cant debug from the start unless we get a new gdb
- # https://sourceware.org/bugzilla/show_bug.cgi?id=24454
- # m gdb -ex="set follow-fork-mode child" -ex=r -ex=quit --args emacs --daemon
- m emacsclient -a "" $args "$@"
- sleep 1
- cd /a/opt/emacs-$(distro-name)$(distro-num)
- s gdb -p $(pgrep -f 'emacs --daemon') -ex c
- cd -
- else
- m emacsclient -a "" $args "$@"
- fi
- fi
+# g pipe. like: cmd | emacs. save cmd output to tmp file, then edit.
+gp() {
+ cat &>/a/tmp/gtmp
+ g "$@" /a/tmp/gtmp
+}
+# g log
+#like cmd &> tempfile; emacs tempfile
+#
+# note: a useful workflow for doing mass replace on my files:
+# gc rem REGEX
+## remove any false positives, or manually edit them. rename files if needed.
+# sedi 's/REGEX/REPLACEMENT/' $(gr '^/' /a/tmp/gtmp)
+gl() {
+ "$@" &> /a/tmp/gtmp
+ g /a/tmp/gtmp
+}
+# g command substitution.
+gc() {
+ # shellcheck disable=SC2046 # i want word splitting for this hackery
+ g $("$@")
}
-# force terminal version
+# g, but run terminal emacs.
gn() {
g -n "$@"
}
+# emacs under gdb.
gmacs() {
# quit will prompt if the program crashes.
gdb -ex=r -ex=quit --args emacs "$@"; r;
}
+# kill the emacs daemon
gdkill() {
- # kill the emacs daemon
pk1 emacs --daemon
}
+_grx() {
+ local arg
+ local -a cmd grep_args
+ cmd=("$1")
+ shift
+ for arg; do
+ if [[ $arg == "-" ]]; then
+ cmd+=("${grep_args[@]}")
+ grep_args=()
+ else
+ grep_args+=("$arg")
+ fi
+ done
+ "${cmd[@]}" | grp "${grep_args[@]}"
+}
+# execute a command, grep it & background with bn.
+# usage:
+# CMD GREP ARGS
+# CMD CMD_ARGS - GREP ARGS
+grx() {
+ bn _grx "$@"
+}
+
+# grep with fix for pipe buffering in interactive shell.
+grp() {
+ gr --color=always --line-buffered "$@" || return $?
+}
+# quick grep
gr() {
grep -iIP --color=auto "$@" || return $?
}
-grr() { # grep recursive
+# grep recursive
+grr() {
# Don't return 1 on nonmatch because this is meant to be
# interactive, not in a conditional.
if [[ ${#@} == 1 ]]; then
- grep --exclude-dir='*.emacs.d' --exclude-dir='*.git' -riIP --color=auto "$@" . || [[ $? == 1 ]]
+ grep --exclude-dir='*.emacs.d' --exclude-dir='*.git' -rniIP --color=auto "$@" . || [[ $? == 1 ]]
else
- grep --exclude-dir='*.emacs.d' --exclude-dir='*.git' -riIP --color=auto "$@" || [[ $? == 1 ]]
+ grep --exclude-dir='*.emacs.d' --exclude-dir='*.git' -rniIP --color=auto "$@" || [[ $? == 1 ]]
fi
}
ccomp grep gr grr
+# recursive grep
rg() { grr "$@"; }
ccomp grep rg
-hr() { # horizontal row. used to break up output
- printf "$(tput setaf 5 2>/dev/null ||:)█$(tput sgr0 2>/dev/null||:)%.0s" $(eval echo "{1..${COLUMNS:-60}}")
- echo
+# recursive grep and find (e for everything). search for files/dirs and
+# lines.
+re() {
+ local query
+ query="$1"
+ find "$@" -not \( -name .svn -prune -o -name .git -prune \
+ -o -name .hg -prune -o -name .editor-backups -prune \
+ -o -name .undo-tree-history -prune \) 2>/dev/null | grep -iP --color=auto "$query"
+ grr -m 5 "$@"
+}
+
+# horizontal row. used to break up output
+hr() {
+ local start end end_count arg
+ # 180 is long enough. 5 for start.
+ start=█████ end=█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████
+ end_count=$(( ${COLUMNS:-180} - 5 ))
+ arg="$*"
+ if [[ $arg ]]; then
+ end_count=$(( end_count - 2 - ${#arg} ))
+ start="$start $arg "
+ fi
+ if (( end_count >= 1 )); then
+ end=${end:0:$end_count}
+ else
+
+ end=
+ fi
+ printf "%s\n" "$(tput setaf 5 2>/dev/null ||:)$start$end$(tput sgr0 2>/dev/null||:)"
}
-# highlight
+# highlighted echo.
hl() {
local col input_len=0
for arg; do
col=$((60 - input_len))
printf "\e[1;97;41m%s" "$*"
if (( col > 0 )); then
+ # shellcheck disable=SC2046 # needed to work as intended. a better way would be like hr above.
printf "\e[1;97;41m \e[0m%.0s" $(eval echo "{1..${col}}")
fi
echo
}
+# hl, then run.
hlm() { hl "$*"; "$@"; }
+# cat file(ss), adding hr lines indicating the start, end, and echo file name.
hrcat() { local f; for f; do [[ -f $f ]] || continue; hr; echo "$f"; cat "$f"; done }
+# example usage:
+# github-release-dl restic/restic restic_ _linux_amd64.bz2
+# gets a url like:
+# https://github.com/restic/restic/releases/download/v0.16.3/restic_0.16.3_linux_amd64.bz2
+github-release-dl() {
+ local github_path file_prefix file_suffix latest_prefix version redir_path
+ github_path=$1
+ file_prefix=$2
+ file_suffix=$3
+ if (( $# != 3 )); then
+ echo "$0: error, expected 3 arguments" >&2
+ return 1
+ fi
+ redir_path="https://github.com/$github_path/releases/latest/download/"
+ latest_prefix=$(curl -s -I "$redir_path" | awk 'tolower($1) == "location:" {print $2}')
+ # it has a trailing /r at the end. just kill any whitespace.
+ latest_prefix="${latest_prefix//[$'\t\r\n ']}"
+ if [[ ! $latest_prefix ]]; then
+ echo "failed to find latest path. Tried to find case insensitive 'location:' in the curl output:"
+ m curl -s -I "$redir_path"
+ return 1
+ fi
+ version="${latest_prefix##*/}"
+ version="${version#v}"
+ m wget -- "$latest_prefix/$file_prefix$version$file_suffix"
+}
+
+## Given a url to a github repo written in go, install its binary
+## (assuming it follows a common pattern).
+# examples:
+# go-github-install restic/restic restic_ _linux_amd64.bz2
+# go-github-install restic/rest-server rest-server_ _linux_amd64.tar.gz
+go-github-install() {
+ local tmpd targetf tmp files src
+ tmpd=$(mktemp -d)
+ cd $tmpd
+ file_prefix=$2
+ file_suffix=$3
+ tmp="${file_prefix##*[[:alnum:]]}"
+ targetf="${file_prefix%"$tmp"}"
+ echo targetf: $targetf
+ github-release-dl "$@"
+ files=(./*)
+ case $file_suffix in
+ *.bz2)
+ bunzip2 -- ./*
+ ;;
+ *.tar.gz|*.tgz)
+ tar -vxzf ./*
+ ;;
+ esac
+ rm -f -- "${files[@]}"
+ files=(./*)
+ # Here we detect and handle 2 cases: either we extracted a single
+ # binary which we have to rename or a folder with a binary named
+ # $targetf in it which is all we care about.
+ if (( ${#files[@]} == 1 )) && [[ -f ${files[0]} ]]; then
+ chmod +x ./*
+ mv -- ./* /usr/local/bin/$targetf
+ else
+ files=(./*/$targetf)
+ if [[ -f $targetf ]]; then
+ src=$targetf
+ elif [[ -f ${files[0]} ]]; then
+ src="${files[0]}"
+ fi
+ chmod +x "$src"
+ mv -- "$src" /usr/local/bin
+ fi
+ cd - >/dev/null
+ rm -rf $tmpd
+}
+## 2024: I'm using gh instead of hub, but leaving this just in case.
+## I tried the github cli tool (gh) and it seems easier than
+## I remember hub.
+##
+## hub predated github's 2020 official cli tool gh.
+## more info at
+## https://raw.githubusercontent.com/cli/cli/trunk/docs/gh-vs-hub.md
# get latest hub and run it
# main command to use:
# hub pull-request --no-edit
# On first use, you input username/pass and it gets an oath token so you dont have to repeat
# it\'s at ~/.config/hub
hub() {
- local up uptar updir p
- p=/github/hub/releases/
- up=https://github.com/$(curl -s https://github.com$p| grep -o $p'download/[^/]*/hub-linux-amd64[^"]*' | head -n1)
+ local up uptar updir p re
+ # example https://github.com/github/hub/releases/download/v2.14.2/hub-linux-amd64-2.14.2.tgz
+ up=$(wget -q -O- https://api.github.com/repos/github/hub/releases/latest | jq -r .assets[].browser_download_url | grep linux-amd64)
+ re='[[:space:]]'
+ if [[ ! $up || $up =~ $re ]]; then
+ echo "failed to get good update url. got: $up"
+ fi
uptar=${up##*/}
updir=${uptar%.tgz}
if [[ ! -e /a/opt/$updir ]]; then
command hub "$@"
}
+# git alias
i() { git "$@"; }
ccomp git i
+# git status:
+# cvs -qn update
+
+# git checkout FILE
+# cvs update -C FILE
+
+# git pull
+# cvs up[date]
+
+# potentially useful command translation
+# https://fling.seas.upenn.edu/~giesen/dynamic/wordpress/equivalent-commands-for-git-svn-and-cvs/
+
+# importing cvs repo into git using git-cvs package:
+# /f/www $ /usr/lib/git-core/git-cvsimport -C /f/www-git
+
+# fast commit all
ic() {
- # fast commit all
git commit -am "$*"
}
+# simple alias
ipp() {
git pull
git push
}
-
+# quick insensitive find
ifn() {
- # insensitive find
+ local glob
+ local -a args
+ while [[ $2 ]]; do
+ args+=("$1")
+ shift
+ done
+ glob="$1"
+ shift
+ find -L "${args[@]}" -not \( -name .svn -prune -o -name .git -prune \
+ -o -name .hg -prune -o -name .editor-backups -prune \
+ -o -name .undo-tree-history -prune \) -iname "*$glob*" 2>/dev/null
+}
+
+ifs() { # insensitive find suffix
+ local glob
+ local -a args
+ while [[ $2 ]]; do
+ args+=("$1")
+ shift
+ done
+ glob="$1"
+ shift
+ find -L "${args[@]}" -not \( -name .svn -prune -o -name .git -prune \
+ -o -name .hg -prune -o -name .editor-backups -prune \
+ -o -name .undo-tree-history -prune \) -iname "*$glob" 2>/dev/null
+}
+
+
+ifh() {
+ # insensitive find here, args are combined into the search string.
+ # -L = follow symlinks
find -L . -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) -iname "*$**" 2>/dev/null
}
-
+# insensitive find directory
ifd() {
- # insensitive find directory
find -L . -type d -not \( -name .svn -prune -o -name .git -prune \
-o -name .hg -prune -o -name .editor-backups -prune \
-o -name .undo-tree-history -prune \) -iname "*$**" 2>/dev/null
}
+ifs() {
+ # insensitive find suffix from here, args are combined into the search string.
+ # -L = follow symlinks
+ find -L . -not \( -name .svn -prune -o -name .git -prune \
+ -o -name .hg -prune -o -name .editor-backups -prune \
+ -o -name .undo-tree-history -prune \) -iname "*$**" 2>/dev/null
+}
+# quick iptables drop a source ip arg.
ipdrop() {
sudo iptables -A INPUT -s $1 -j DROP
}
-
+# test if a file contains text vs binary.
istext() {
grep -Il "" "$@" &>/dev/null
}
-jtail() {
- journalctl -n 10000 -f "$@"
+# pstree alias
+pst() {
+ pstree -apnAl
+}
+
+# journalctl with times in the format the --since= and --until= options accept
+jrt() { journalctl -e -n100000 -o short-full "$@"; }
+jr() { journalctl -e -n100000 "$@" ; }
+jrf() { SYSTEMD_COLORS=true bn journalctl -n1000 -f "$@" ; }
+jrfg() {
+ _jrfg() { SYSTEMD_COLORS=true journalctl -n1000 -f | grp "$@"; }
+ bn _jrfg "$@"
}
-jr() { journalctl "$@" ; }
-jrf() { journalctl -f "$@" ; }
jru() {
- journalctl -u exim4 _SYSTEMD_INVOCATION_ID=$(systemctl show -p InvocationID --value $1)
+ journalctl -u "$@" ;
+}
+jrug() {
+ _jrug() {
+ local unit
+ unit="$1"
+ shift
+ SYSTEMD_COLORS=true journalctl -n1000 -f -u "$unit" | grp "$@"
+ }
+ bn _jrug "$@"
}
+ccomp journalctl jr jrf jru
+
+# ls with minor improvements
l() {
if [[ $PWD == /[iap] ]]; then
command ls -A --color=auto -I lost+found "$@"
command ls -A --color=auto "$@"
fi
}
-
+# locate -i without the need to quote spaces
lcn() { locate -i "*$**"; }
-
+# ll with directories grouped
lg() { LC_COLLATE=C.UTF-8 ll --group-directories-first "$@"; }
-
+# ll mod time sorted
lt() { ll -tr "$@"; }
-
+# quick ll -d
lld() { ll -d "$@"; }
ccomp ls l lg lt lld ll
done
}
-low() { # make filenames lowercase, remove bad chars
+# make filenames lowercase, remove bad chars
+low() {
local arg new dir f
for arg; do
arg="${arg%%+(/)}" # remove trailing slashes. assumes we have extglob on.
fi
f="${arg##*/}"
new="${f,,}" # downcase
- new="${new//[^[:alnum:]._-]/_}" # sub bad chars
+ # shellcheck disable=SC2031 # seems like a shellcheck bug
+ new="${new//[^a-zA-Z0-9._-]/_}" # sub bad chars
new="${new#"${new%%[[:alnum:]]*}"}" # remove leading/trailing non-alnum
new="${new%"${new##*[[:alnum:]]}"}"
# remove bad underscores, like __ and _._
k() { # history search
grep -iP --binary-files=text "$@" ${HISTFILE:-~/.bash_history} | tail -n 80 || [[ $? == 1 ]];
}
-ks() { # history search
+ks() { # history search with context
+ # args are an extended regex used by sed
+ history | sed -nr "h;s/^\s*(\S+\s+){4}//;/$*/{g;p}" | tail -n 80 || [[ $? == 1 ]];
+}
+ksu() { # history search unique
grep -P --binary-files=text "$@" ${HISTFILE:-~/.bash_history} | uniq || [[ $? == 1 ]];
}
-ccomp grep k ks
+# remove lines from history matching $1
+#
+# todo: id like to do maybe a daily or hourly cronjob to
+# check that my history file size is increasing. Ive had it
+# inexplicably truncated in the past.
+histrm() {
+ history -n
+ HISTTIMEFORMAT='' history | awk -v IGNORECASE=1 '{ a=$1; sub(/^ *[^ ]+ */, "") }; /'"$*"'/'
+ read -r -p "press anything but contrl-c to delete"
+ for entry in $(HISTTIMEFORMAT='' history | awk -v IGNORECASE=1 '{ a=$1; sub(/^ *[^ ]+ */, "") }; /'"$*"'/ { print a }' | tac); do
+ history -d $entry
+ done
+ history -w
+}
+# history without the date
+histplain() {
+ history "$@" | cut -d' ' -f 7-
+}
+
+ccomp grep k ks ksu histrm
+
+# show makefile targets of current directory makefile.
make-targets() {
- # show make targets, via http://stackoverflow.com/questions/3063507/list-goals-targets-in-gnu-make
+ # via http://stackoverflow.com/questions/3063507/list-goals-targets-in-gnu-make
make -qp | awk -F':' '/^[a-zA-Z0-9][^$#\/\t=]*:([^=]|$)/ {split($1,A,/ /);for(i in A)print A[i]}'
}
+# mkdir + cd
mkc() {
mkdir "$1"
c "$1"
}
ccomp mkdir mkc
-
+# mktemp -d + cd
mkct() {
- mkc $(mktemp -d)
+ mkc "$(mktemp -d)"
}
-
-mkt() { # mkdir and touch file
+# mkdir the last arg, then cp the remaining args into it
+mkcp() {
+ mkdir -p "${@: -1}"
+ cp "${@:1:$#-1}" "${@: -1}"
+}
+# mkdir the last arg, then mv the remaining args into it
+mkmv() {
+ mkdir -p "${@: -1}"
+ mv "${@:1:$#-1}" "${@: -1}"
+}
+# mkdir directory underneath $1, then touch $1
+mkt() {
local path="$1"
mkdir -p "$(dirname "$path")"
touch "$path"
# shellcheck disable=SC2032
mkdir() { command mkdir -p "$@"; }
+# nagstamon fix
nags() {
# https://github.com/HenriWahl/Nagstamon/issues/357
- if ! pgrep -f /usr/lib/notification-daemon/notification-daemon >/dev/null; then
- /usr/lib/notification-daemon/notification-daemon &
+ if ! pgrep -f /usr/bin/dunst >/dev/null; then
+ /usr/bin/dunst &
fi
/usr/bin/nagstamon &
}
+# profanity tmux
+profsrc() {
+ screen -L profanity a
+}
+
+# i dont want to wait for konsole to exit...
+prof() {
+ command prof &>/dev/null &
+}
+# self chat. chatlog messages to myself.
+sc() {
+ while read -r l; do
+ printf '\033[1A\033[K'; printf "%s\n" "$l"| ts "%F %T" | tee -a /p/self-chat.log
+ done
+}
+
+# simple alias
nmt() {
- s nmtui-connect "$@"
+ # cant use s because sudo -i doesnt work for passwordless sudo command
+ case $EUID in
+ 0)
+ sudo nmtui-connect "$@"
+ ;;
+ *)
+ nmtui-connect "$@"
+ ;;
+ esac
}
+# nullglob set, track original state.
+ngset() {
+ if shopt nullglob >/dev/null; then
+ ngreset=false
+ else
+ shopt -s nullglob
+ ngreset=true
+ fi
+}
+# nullglob reset
+ngreset() {
+ if $ngreset; then
+ shopt -u nullglob
+ fi
+}
+
+# empty exim panic log
nopanic() {
# shellcheck disable=SC2024
- sudo tee -a /var/log/exim4/paniclog-archive </var/log/exim4/paniclog; sudo truncate -s0 /var/log/exim4/paniclog
+ ngset
+ for f in /var/log/exim4/paniclog /var/log/exim4/*panic; do
+ base=${f##*/}
+ if [[ -s $f ]]; then
+ echo ================== $f =============
+ s tee -a /var/log/exim4/$base-archive <$f
+ s truncate -s0 $f
+ fi
+ done
+ ngreset
}
+
+ping() { command ping -O "$@"; }
p8() { ping "$@" 8.8.8.8; }
p6() { ping6 "$@" 2001:4860:4860::8888; }
pkx() { # package extract
local pkg cached tmp f
- c $(mktemp -d)
+ c "$(mktemp -d)"
pkg=$1
# shellcheck disable=SC2012
- cached=$(ls -t /var/cache/apt/archives/$pkg* | tail -n1 2>/dev/null) ||:
+ cached=$(ls -t /var/cache/apt/archives/${pkg}_* 2>/dev/null | tail -n1 2>/dev/null) ||:
if [[ $cached ]]; then
- cp $cached .
+ m cp $cached .
else
- aptitude download $pkg || return 1
+ m aptitude download $pkg || return 1
fi
tmp=(*); f=${tmp[0]} # only 1 expected
- ex $f
- rm -f $f
+ m ex $f
+ m rm -f $f
}
# pgrep and kill
pk1() {
- local pid
- pid=($(pgrep -f "$*"))
- case ${#pid[@]} in
+ local tmpf
+ local -a pids
+ tmpf=$(pgrep -f "$*")
+ mapfile -t pids <<<"$tmpf"
+ case ${#pids[@]} in
1)
# shellcheck disable=SC2128
{
- ps -F $pid
- m kill $pid
+ ps -F ${pids[0]}
+ m kill ${pids[0]}
}
;;
0) echo "no pid found" ;;
*)
- ps -F ${pid[@]}
+ ps -F ${pids[@]}
;;
esac
}
+# ps grep
psg () {
local x y help
help="Usage: psg [--help] GREP_ARGS
fi
x=$(ps -eF)
# final grep is because some commands tend to have a lot of trailing spaces
- y=$(echo "$x" | grep -iP "$@" | grep -o '.*[^ ]') ||:
+ y=$(echo "$x" | sed -r 's,//[^[:space:]:@/]+:[^[:space:]:@/]+@,//REDACTED_URL_USER@PASS/,g' | grep -iP "$@" | grep -o '.*[^ ]') ||:
if [[ $y ]]; then
echo "$x" | head -n 1 || [[ $? == 141 ]]
echo "$y"
fi
-}
+}
+
+pubip() { curl -4s https://icanhazip.com; }
+pubip6() { curl -6s https://icanhazip.com; }
+whatismyip() { pubip; }
+
+
+q() { # quietly start / launch a program
+ "$@" &> /dev/null & disown
+ # online, I ran into a version that runs in a subshell with ( ), I
+ # suppose that might quiet some programs that try to talk to the tty
+ # on their own. I'll just wait and see if I ever have the need for
+ # that.
+}
+# quiet , but keep it as a job
+qk() {
+ "$@" &> /dev/null &
+}
+
+# quietly run command and then redisplay prompt on the same line.
+qr() {
+ local ret=0
+ "$@" &>/dev/null || ret=$?
+ # https://gist.github.com/fnky/458719343aabd01cfb17a3a4f7296797
+ # [2K = erase whole line
+ # [1A = go up one line. aka tput cuu 1
+ echo -ne "\e[1A\e[2K"
+ return $ret
+}
+# Execute then redo the prompt back at the original spot.
+# we = random easy to type command.
+#
+# Also echoes a block after the end of the output. If we didn't do that,
+# it would intermingle with previous output of this command.
+#
+# I wondered about doing this automatically for every command,
+# and I found https://unix.stackexchange.com/questions/562018/run-every-bash-command-through-a-function-or-wrapper
+# which gives me the idea that I could rebind enter to first
+# prefix the current command with a wrapper.
+# We could also use the debug trap to define a function of the about to be executed command,
+# but I don't know how to modify an existing function.
+#
+# todo: consider better handling of when linec > LINES
+#
+# todo: create a command which deletes the lines beneath the cursor (one probably already exists).
+we() {
+ local ret=0 out linec i tmp cur_line empty_lines clear_lines tmpf
+
+ # for wemode
+ while [[ $1 == we ]]; do
+ shift
+ done
+ # Give up if our command is part of a pipeline.
+ if ! test -t 1 || ! test -t 2; then
+ "$@" || ret=$?
+ return $ret
+ fi
-pubip() { curl -4s https://icanhazip.com; }
-pubip6() { curl -6s https://icanhazip.com; }
-whatismyip() { pubip; }
+ tmpf=$(mktemp)
+ # note: Another way to do this without redirection would be with tput
+ # sc and tput rc, but if our command output past the bottom line of
+ # the terminal, we'd be restoring into the middle of its output.
+ "$@" &>$tmpf || ret=$?
+
+ # we can't do this because inside the {, some terminal escape sequences don't work right.
+ #"$@" |& { read -r -d '' out
+ out=$(cat $tmpf)
+ rm -- $tmpf
+ linec=0
+ if [[ $out ]]; then
+ # wc gives us 1 when out is an empty string because $() is not newline terminated.
+ linec=$(wc -l <<<"$out")
+ fi
+ # always clear the very next line after our prompt.
+ echo -ne "\e[2K"
+ clear_lines=$(( linec - 1 ))
+ if (( clear_lines > 0 )); then
+ # https://stackoverflow.com/questions/2575037/how-to-get-the-cursor-position-in-bash
+ # note, echoing $tmp at this point wont show anything because it is
+ # still an escape sequence.
+ IFS='[;' read -p $'\e[6n' -d R -rs tmp
+ tmp="${tmp%%;*}"
+ cur_line="${tmp##*[^0-9]}"
+ empty_lines=$(( LINES - cur_line ))
+ if (( empty_lines < linec )); then
+ clear_lines="$empty_lines"
+ fi
+ for (( i=0; i < linec - 1; i++ )); do
+ # 1B = go down one line
+ echo -ne "\e[1B\e[2K"
+ done
+ echo -ne "\e[${clear_lines}A\e[2K"
+ fi
+ if (( linec > 0 )); then
+ printf "%s\n█\n" "$out"
+ else
+ printf "█\n"
+ fi
+ # see qr for escape code explanation
+ echo -ne "\e[$(( linec + 2 ))A\e[2K"
+ return $ret
+}
-q() { # start / launch a program in the backround and redir output to null
- "$@" &> /dev/null &
+# mode where every command is automatically wrapped in we()
+wemode() {
+ # c-space to set mark, eOH is begining of line (dunno how to press that
+ # on a keyboard). c-q is jump back to mark. c-j = alternate enter
+ #
+ # Another interesting variant of this which would handle pipelines
+ # would be to take "original | cmd" and make it be: "t() { original |
+ # cmd; }; we t"
+ #
+ # This has an annoying flaw that if we run a command from history that
+ # already starts with we, it adds another we. Might be able to fix it
+ # with some readline functionality, or the stackoverflow page has a
+ # bit about editing the last history entry, which I want to avoid
+ # saving the history file. It isn't a big deal, I'm just going to
+ # leave it. One idea is: history is editable, we could press up,
+ # remove the we, then press down.
+ bind '"\C-m": "\e \eOHwe \C-q\C-j"'
+}
+# turn off wemode
+weoff() {
+ bind '"\C-m": accept-line'
+}
+
+_bn() {
+ local line lwlc i
+ "$@" |& while read -r line; do
+ # lwlc = line wrapped line count.
+ # https://stackoverflow.com/questions/17998978/removing-colors-from-output
+ lwlc=$(( $(printf %s "$line" | sed -r "s/\x1B\[([0-9]{1,3}(;[0-9]{1,2};?)?)?[mGK]//g" | wc -c ||:) / COLUMNS + 1 ))
+ # from man terminfo
+ # tput sc = \e7 = save cursor
+ # tput rc = \e8 = restore cursor
+ # tput hpa 0 = \e[1G = move cursor to column 0
+ # tput il X = \e[XL = insert X lines
+ # tput ind = \eD = (according to
+ # https://superuser.com/questions/1106674/how-to-add-blank-lines-above-the-bottom-in-terminal
+ # But I can't verify because cannot be captured into a var from tput.)
+ #\e[XA = up X lines
+ for (( i=0; i < lwlc; i++ )); do
+ echo -ne "\eD"
+ done
+ echo -ne "\e7\e[${lwlc}A\e[1G\e[${lwlc}L$line\e8"
+ done
+}
+# Run the command in the background and make its output go above our
+# prompt so it doesn't interfere with it. Especially useful for tailing
+# logs.
+#
+# The name bn is not special.
+#
+# Note: if you want to tail -f | grep, wrap it in a function
+# first, and use grep --line-buffered --color=always (because we are piping to a pipe).
+#
+# Note: colorization will need to be turned on since it captures
+# output to a pipe, eg: SYSTEMD_COLORS=true bn journalctl -f
+#
+bn() {
+ # We wrap a function rather than a long {} so that it looks nicer in
+ # job control output.
+ _bn "$@" &
}
+# save history & exit the shell.
# shellcheck disable=SC2120
r() {
if [[ $HISTFILE ]]; then
# exit "$@" 2>/dev/null
}
-# scp is insecure and deprecated.
+# rsync with nice defaults.
+# scp is a bit insecure and deprecated.
scp() {
- rsync --inplace "$@"
+ local -a arg
+ if [[ $SL_SSH_ARGS ]]; then
+ arg=(-e "ssh $SL_SSH_ARGS")
+ fi
+ rsync "${arg[@]}" -Pt --inplace "$@"
}
+ccomp rsync scp
+# output a random high port.
randport() {
# available high ports are 1024-65535,
# but lets skip things that are more likely to be in use
# reapply bashrc
reb() {
+ # shellcheck disable=SC1090 # expected to not follow
source ~/.bashrc
}
+
rl() {
readlink -f "$@"
}
ccomp readlink rl
+# rsync delete.
rsd() {
# rsync, root is required to keep permissions right.
# rsync --archive --human-readable --verbose --itemize-changes --checksum \(-ahvic\) \
# basically, make an exact copy, use checksums instead of file times to be more accurate
rsync -ahvic --delete "$@"
}
+# rlu, but dont delete files extra files on the remote host.
rsa() {
- # like rlu, but dont delete files on the target end which
- # do not exist on the original end.
rsync -ahvic "$@"
}
+# rsync: use checksums & preserve target modification time.
rst() {
- # rl without preserving modification time.
rsync -ahvic --delete --no-t "$@"
}
-rsu() { # [OPTS] HOST PATH
- # eg. rlu -opts frodo /testpath
+# rsync update a common absolute path.
+# [RSYNC_OPTS] HOST PATH
+rsu() {
+ # eg. rsu -opts frodo /testpath
# relative paths will expanded with readlink -f.
opts=("${@:1:$#-2}") # 1 to last -2
path="${*:$#}" # last
if [[ $path == .* ]]; then
path=$(readlink -f $path)
fi
- # rync here uses checksum instead of time so we dont mess with
- # unison relying on time as much. g is for group, same reason
- # to keep up with unison.
- m s rsync -rlpchviog --relative "${opts[@]}" "$path" "root@$host:/";
+ m rsync -ahvi --relative --no-implied-dirs "${opts[@]}" "$path" "root@$host:/";
}
ccomp rsync rsd rsa rst rsu
-# find programs listening on a port
+# find programs listening on a given port
ssp() {
local port=$1
# to figure out these args, i had to look at the man page from git version, as of 2022-04.
s ss -lpn state listening sport = $port
}
+# output info relevant to debugging dns resolution
resolvcat() {
local f
if [[ $(systemctl is-active nscd ||:) != inactive ]]; then
m s nscd -i hosts
fi
f=/etc/resolv.conf
- echo $f:; ccat $f
- hr; s ss -lpn sport = 53
+ ll $f
+ m ccat $f
+ hr; m s ss -lpn sport = 53
if systemctl is-enabled dnsmasq &>/dev/null || [[ $(systemctl is-active dnsmasq ||:) != inactive ]]; then
# this will fail is dnsmasq is failed
hr; m ser status dnsmasq | cat || :
grep '^ *hosts:' /etc/nsswitch.conf
if systemctl is-enabled systemd-resolved &>/dev/null || [[ $(systemctl is-active systemd-resolved ||:) != inactive ]]; then
hr; m ser status systemd-resolved | cat || :
- hr; m systemd-resolve --status | cat
+ hr; m resolvectl status | cat
fi
}
+# simple alias
rcat() {
resolvcat | less
}
+# restart whatever resolver I happen to be using.
reresolv() {
if [[ $(systemctl is-active nscd ||:) != inactive ]]; then
m ser stop nscd
fi
}
+# sed + add annoyingly long argument which should be the default
+sedi() {
+ sed -i --follow-symlinks "$@"
+}
+
+
+## piped in lines get shell escaped.
+#
+# todo: test variable assignment with newlines here.
+# https://stackoverflow.com/questions/15783701/which-characters-need-to-be-escaped-when-using-bash
+#
+# beware that it only works on the assumption that any special
+# characters in the input string are intended to be escaped, not to work
+# as special chacters.
+shellescape() {
+ LC_ALL=C sed -e 's/[^a-zA-Z0-9,._+@%/-]/\\&/g; 1{$s/^$/""/}; 1!s/^/"/; $!s/$/"/'
+}
+
rmstrips() {
ssh fencepost head -n 300 /gd/gnuorg/EventAndTravelInfo/rms-current-trips.txt | less
}
+# run a command with $1 as its umask.
+urun () {
+ umask $1
+ shift
+ "$@"
+}
+# simple wrapper
+sudo () {
+ command sudo "$@" || return $?
+ DID_SUDO=true
+}
+# sudo wrapper.
s() {
# background
# I use a function because otherwise we cant use in a script,
"$@"
fi
}
-sb() { # sudo bash -c
- # use sb instead of s is for sudo redirections,
- # eg. sb 'echo "ok fine" > /etc/file'
+# sudo bash -c
+# use sb instead of s is for sudo redirections,
+# eg. sb 'echo "ok fine" > /etc/file'
+sb() {
# shellcheck disable=SC2034
local SUDOD="$PWD"
sudo -i bash -c "$@"
}
-ccomp sudo s sb
+# sudo with umask 0077, amenable to making files containing secrets.
+se() { s urun 0077 "$@"; }
+ccomp sudo s sb se
-safe_rename() { # warn and dont rename if file exists.
+# mv, but warn and dont rename if the target file exists.
+safe_rename() {
# mv -n exists, but it\'s silent
if [[ $# != 2 ]]; then
- echo safe_rename error: $# args, need 2 >2
+ echo safe_rename error: $# args, need 2 >&2
return 1
fi
if [[ $1 != "$2" ]]; then # yes, we want to silently ignore this
fi
}
-
+# simple alias
sd() {
sudo dd status=none of="$1"
}
-
+# service wrapper.
ser() {
if type -p systemctl &>/dev/null; then
s systemctl "$@"
s service $2 $1
fi
}
+# simple alias
serstat() {
systemctl -n 40 status "$@"
}
+# journalctl -f FINAL_ARG, then systemctl $@ .
+# detects any existing background job so we can
+# run multiple times without making duplicate output.
+serj() {
+ local service jr_pid ret jobs_out regex
+ ret=0
+ service="${*: -1}"
+ jobs_out=$(jobs 2>/dev/null)
+ regex="^[^[:space]]+[[:space:]]+Running[[:space:]]+ journalctl -qn2 -f -u $service &"
+
+ journalctl -qn2 -f -u "$service" &
+ sleep 3
+ s systemctl "$@" || ret=$?
+ if (( ret != 0 )); then
+ echo "serj: note: systemctl returned nonzero, but journalctl worked and is running."
+ return $ret
+ fi
+}
+
+
seru() { systemctl --user "$@"; }
# like restart, but do nothing if its not already started
srestart() {
if [[ -s $file ]]; then
sed -ri -f - "$file" <<EOF
# remove existing keys
-/ *\[$section\]/,/^ *\[[^]]+\]/{/^\s*$key[[:space:]=]/d}
+/ *\[$section\]/,/^ *\[[^]]+\]/{/^\s*${key}[[:space:]=]/d}
# add key
/^\s*\[$section\]/a $key=$value
# from section to eof, do nothing
ser enable $service
fi
}
+# service off
soff () {
for service; do
# ignore services that dont exist
done
}
+# Systemd Grep Unit file list.
sgu() {
systemctl list-unit-files | rg "$@"
}
-
+# test for whether we generally want to do sk on the file
+sk-p() {
+ [[ ! -L $f ]] && istext "$1" && [[ $(head -n1 "$1" 2>/dev/null) == '#!/bin/bash'* ]]
+}
+# wrapper for shellcheck with better defaults.
sk() {
+ # see https://savannah.gnu.org/maintenance/fsf/bash-style-guide/ for justifications
+ local quotes others ret
+ quotes=2048,2068,2086,2206,2254
+ others=2029,2032,2033,2054,2164,2317
+ shellcheck -x -W 999 -e $quotes,$others "$@" || ret=$?
+ if (( ret >= 1 )); then
+ echo "A template comment to disable is now in clipboard. eg: # shellcheck disable=SC2206 # reason"
+ cbs "# shellcheck disable=SC"
+ return $ret
+ fi
+}
+# sk with quotes. For checking scripts that we expect to take untrusted
+# input in order to verify we quoted vars.
+skq() {
+ local others
+ others=2029,2033,2054,2164
+ shellcheck -W 999 -x -e $others "$@" || return $?
+}
- # note, if you do something like this
- # x=( prefix* )
- # then disable the warning with:
- # shellcheck disable=SC2206 # globbing is intended
-
- # 2029: "unescaped, this expands on the client side.": yes, I know how ssh works
- # 2164: "Use 'cd ... || exit' or 'cd ... || return' in case cd fails.": i have automatic error handling
- # 2086: unquoted $var: Quoting every var I set is way too much quotes.
- # 2068: Double quote array expansions to avoid re-splitting elements: same as above.
- # 2033: command arg is a function name: too many false positives.
+# sk on all modified & new files in current git repo. must git add for new files.
+skmodified() {
+ local f
+ for f in $(i s | awk '$1 == "modified:" {print $2}; $1 == "new" {print $3}'); do
+ if sk-p "$f"; then
+ sk $f ||:
+ fi
+ done
+}
- # these ones I had disabled, but without a good written explanation, so enabling them temporarily
- # 2046: unquoted $(cmd)
- # 2119: Functions with optional args get bad warnings when none are passed.
+# sk on all the files in current git repo
+skgit() {
+ local f toplevel orig_dir tmp
+ local -a ls_files sk_files
+ toplevel=$(git rev-parse --show-toplevel)
+ if [[ $PWD != "$toplevel" ]]; then
+ orig_dir=$PWD
+ cd $toplevel
+ fi
+ # tracked & untracked files
+ tmp=$(git ls-files && git ls-files --others --exclude-standard)
+ mapfile -t ls_files <<<"$tmp"
+ for f in "${ls_files[@]}"; do
+ if sk-p "$f"; then
+ sk_files+=("$f")
+ fi
+ done
+ sk "${sk_files[@]}"
+ if [[ $orig_dir ]]; then
+ cd $orig_dir
+ fi
+}
- shellcheck -W 999 -x -e 2029,2164,2086,2068,2033 "$@" || return $?
+# where slow is hardcoded to 4 seconds. change if desired.
+slowdo() {
+ local lines ret start_sec=$EPOCHSECONDS loop=true
+ "$@" |& while true; do
+ ret=0
+ read -r -t1 line || ret=$?
+ if (( ret == 0 )); then
+ lines+="$line"$'\n'
+ elif (( ret != 142 )); then
+ break
+ fi
+ if (( EPOCHSECONDS > start_sec + 4 )); then
+ printf "%s" "$lines"
+ cat
+ break
+ fi
+ done
}
-# sl: ssh, but firsh rsync our bashrc and related files to a special
-# directory on the remote host if needed.
+#### sl: ssh wrapper, but maybe first rsync files that we configure and
+# always source our .bashrc on the remote.
-# Some environment variables and files need to be setup for this to work
-# (mine are set at the beginning of this file)
+# TODO: this needs better documentation.
+
+# Usage: all the same args as ssh + a few below. Note, combining options
+# that have arguments with ones that dont, like -4oOption is not
+# supported.
+
+## Required setup
+
+# Sorry these have no defaults, you can read my settings in this file.
# SL_FILES_DIR: Environment variable. Path to folder which should at
-# least have a .bashrc file or symlink. This dir will be rsynced to ~ on
-# remote hosts (top level symlinks are resolved) unless the host already
-# has a $SL_FILES_DIR/.bashrc. In that case, we assume it is a host you
-# control and sync files to separately and already has the ~/.bashrc you
-# want. The remote bash will also take its .inputrc config from this
-# folder (default of not existing is fine). Mine looks like this:
-# https://iankelling.org/git/?p=distro-setup;a=tree;f=sl/.iank
+# least have a .bashrc file or symlink to it. This dir will be rsynced
+# to ~ on remote hosts (top level symlinks are resolved) unless the host
+# already has a $SL_FILES_DIR/.bashrc. In that case, we assume it is a
+# host you control and sync files to separately and already has the
+# ~/.bashrc you want. The remote bash will also take its .inputrc config
+# from this folder (default of not existing is fine). Mine looks like
+# this: https://iankelling.org/git/?p=distro-setup;a=tree;f=sl/.iank
# SL_INFO_DIR: Environment variable. This folder stores info about what
# we detected on the remote system and when we last synced. It will be created
# remote system, you can use sl --rsync, or the function for that slr
# below.
-# SL_TEST_CMD: Env var. Meant to be used to vary the files synced
-# depending on the remote host. Run this string on the remote host the
-# first time sl is run (or if we run slr). The result is passed to
-# SL_TEST_HOOK. For example,
-# export SL_TEST_CMD=". /etc/os-release ; echo \${VERSION//[^a-zA-Z0-9]/}"
-# SL_TEST_HOOK: Env var. It is run as $SL_TEST_HOOK. This can set
-# $SL_FILES_DIR to vary the files synced.
+## Optional settings
+
+# --rsync Forget about any previous rsync we did. The most common case
+# is that the ssh target does not have files we previously rsynced for
+# various reasons. I have a wrapper for this called slr below.
-# SL_RSYNC_ARGS: Env var. String of arguments passed to rsync. For
-# example to exclude files within a directory. Note, excluded
-# files wont be deleted on rsync, you can add --delete-excluded
-# to the rsync command if that is desired.
+# SL_TEST_CMD / --sl-test-cmd CMD: Env var or cli option. If set, we run
+# this string on the remote host the first time sl is run (or if we run
+# slr). Its standard out is passed to SL_TEST_HOOK which gets run
+# locally. It is meant to be used to vary the files synced depending on
+# the remote host. For example, export SL_TEST_CMD=". /etc/os-release ;
+# echo \${VERSION//[^a-zA-Z0-9]/}". cli option overrides env var.
+
+# SL_TEST_HOOK / --sl-test-hook HOOK: Env var or cli option. See SL_TEST_CMD
+# above. It is run unquoted (with expansion). This can set $SL_FILES_DIR
+# to vary the files synced. cli option overrides env var.
+
+# SL_RSYNC_ARGS / --sl-rsync-args ARGS: Env var or cli option. String of
+# arguments passed to rsync. For example to exclude files within a
+# directory. Note, excluded files wont be deleted on rsync, you can add
+# --delete-excluded to the rsync command if that is desired. cli
+# overrides env var.
# SL_SSH_ARGS: Env var. Default arguments passed to ssh.
# .bashrc. This means the outer shell still ran the default .bashrc,
# but that is the best we can do.
- local now args remote dorsync haveinfo tmpa sshinfo tmp tmp2 type info_sec force_rsync \
+ local now args remote dorsync haveinfo tmpa sshinfo tmp tmp2 host_type info_sec force_rsync \
sync_dirname testcmd extra_info testbool files_sec sl_test_cmd sl_test_hook
declare -a args tmpa
args=($SL_SSH_ARGS)
+ # when we last checked, these were the valid ssh args:
+
# ssh [-1246Antivivisectionist] [-b bind_address] [-c cipher_spec] [-D [bind_address:]port]
# [-E log_file] [-e escape_char] [-F configfile] [-I pkcs11] [-i identity_file] [-L address]
# [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port] [-Q query_option]
# [-O ctl_cmd] [-o option] [-p port] [-Q query_option] [-R address]
# [-S ctl_path] [-W host:port] [-w local_tun[:remote_tun]]
+ verbose_arg=false
force_rsync=false
if [[ $1 == --rsync ]]; then
force_rsync=true
shift
fi
-
+ # shellcheck disable=SC2153 # intentional
sl_test_cmd=$SL_TEST_CMD
+ # shellcheck disable=SC2153 # intentional
sl_test_hook=$SL_TEST_HOOK
+ # shellcheck disable=SC2153 # intentional
sl_rsync_args=$SL_RSYNC_ARGS
while [[ $1 ]]; do
case "$1" in
case "$1" in
# note we dont support things like -4oOption
-[46AaCfGgKkMNnqsTtVvXxYy]*)
+ if [[ $1 == *v* ]]; then
+ # todo: try running slowdo if this is false
+ verbose_arg=true
+ fi
args+=("$1"); shift
;;
-[bcDEeFIiJLlmOopQRSWw]*)
shift
if [[ ! $SL_INFO_DIR ]]; then
- echo error: missing '$SL_INFO_DIR' env var >&2
+ echo 'error: missing SL_INFO_DIR env var' >&2
return 1
fi
- dorsync=false
+ do_rsync=false
haveinfo=false
- tmpa=($SL_INFO_DIR/???????????"$remote")
+ tmpa=($SL_INFO_DIR/??????????-????-"$remote")
sshinfo=${tmpa[0]}
if [[ -e $sshinfo ]]; then
if $force_rsync; then
fi
if $haveinfo; then
tmp=${sshinfo[0]##*/}
- tmp2=${tmp::11}
- type=${tmp2: -1}
+ tmp2=${tmp:11} # skip 11 chars
+ host_type=${tmp2%%-*}
extra_info=$(cat $sshinfo)
+ # debug
+ #echo d1 $host_type
else
# we test for string to know ssh succeeded
testbool="test -e $SL_FILES_DIR/.bashrc -a -L .bashrc -a -v LC_USEBASHRC"
command ssh -v "${args[@]}" "$remote"
fi
if [[ $tmp == y* ]]; then
- type=a
+ # yes test result, no need to rsync this host.
+ host_type=skip
else
- dorsync=true
- type=b
+ do_rsync=true
+ host_type=sync
fi
extra_info="${tmp:1}"
fi
RSYNC_RSH="ssh ${args[*]}" $sl_test_hook "$extra_info" "$remote"
fi
- if $haveinfo && [[ $type == b ]]; then
+ if $haveinfo && [[ $host_type == sync ]]; then
info_sec=${tmp::10}
- read files_sec _ < <(find -L $SL_FILES_DIR -printf "%T@ %p\n" | sort -nr || [[ $? == 141 || ${PIPESTATUS[0]} == 32 ]] )
+ read -r files_sec _ < <(find -L $SL_FILES_DIR -printf "%T@ %p\n" | sort -nr || [[ $? == 141 || ${PIPESTATUS[0]} == 32 ]] )
files_sec=${files_sec%%.*}
+ # info_sec is the last time we synced to that host. files_sec is the
+ # greatest mod time out of files we want to sync.
+
if (( files_sec > info_sec )); then
- dorsync=true
+ #echo "d4 $files_sec > $info_sec"
+ do_rsync=true
rm -f $sshinfo
fi
fi
sync_dirname=${SL_FILES_DIR##*/}
if [[ ! $SL_FILES_DIR ]]; then
- echo error: missing '$SL_FILES_DIR' env var >&2
+ echo 'error: missing SL_FILES_DIR env var' >&2
return 1
fi
-
- if $dorsync; then
+ if $do_rsync; then
+ # todo: it would be nice if we did this with -v, but
+ # only showed the output if the command lasted more than
+ # about 4 seconds.
RSYNC_RSH="ssh ${args[*]}" m rsync -rptL --delete $sl_rsync_args $SL_FILES_DIR "$remote":
fi
- if $dorsync || ! $haveinfo; then
- sshinfo=$SL_INFO_DIR/$EPOCHSECONDS$type"$remote"
+ if $do_rsync || ! $haveinfo; then
+ sshinfo=$SL_INFO_DIR/$EPOCHSECONDS-$host_type-"$remote"
[[ -e $SL_INFO_DIR ]] || mkdir -p $SL_INFO_DIR
+ # debug
+ #echo d3: $sshinfo
printf "%s\n" "$extra_info" >$sshinfo
chmod 666 $sshinfo
fi
- if [[ $type == b ]]; then
+ if [[ $host_type == sync ]]; then
if (( ${#@} )); then
- # Theres a couple ways to pass arguments, im not sure whats best,
- # but relying on bash 4.4+ escape quoting seems most reliable.
+ # in the past, I used ${@@Q} passed to a bash subshell. Then
+ # later, I noticed it didn't handle spaces as normal ssh does, and
+ # didn't see any benefit to doing it that way (perhaps forgot what
+ # I originally saw in it).
command ssh "${args[@]}" "$remote" \
- LC_USEBASHRC=t bash -c '.\ '$sync_dirname'/.bashrc\;"\"\$@\""' bash ${@@Q}
+ LC_USEBASHRC=t . $sync_dirname/.bashrc\; "$@"
elif [[ ! -t 0 ]]; then
# This case is when commands are being piped to ssh.
# Normally, no bashrc gets sourced.
fi
else
if [[ -t 0 ]]; then
- LC_USEBASHRC=t command ssh "${args[@]}" "$remote" ${@@Q}
+ LC_USEBASHRC=t command ssh "${args[@]}" "$remote" "$@"
else
command ssh "${args[@]}" "$remote" LC_USEBASHRC=t bash
fi
slr() {
sl --rsync "$@"
}
-sss() { # ssh solo
- sl -oControlMaster=no -oControlPath=/ "$@"
+
+# ssh with multiplexing.
+ssm() {
+ sl -oControlPath=$HOME/.cssh_%u_%h_%p_%r "$@"
+}
+# ssh solo, without multiplexing. # currently the default, but I might switch back.
+sss() {
+ ssh -oControlPath=none "$@"
+}
+
+# ssh that respects -i as the only key to use. Good for debugging.
+#
+# Without ControlMaster/ControlPath, it would reuse an existing ssh
+# connection.
+#
+# Without IdentityAgent, it would send keys in the ssh -agent (could
+# also be fixed by ssh-add -D to delete them.
+#
+# Without -F, we'd get identities specified in our ssh config.
+#
+# Without AddKeysToAgent=no, we'd get keys added to the agent we might
+# not want, and without the ask setting.
+sshi() {
+ ssh -oControlMaster=no -oControlPath=/ -oAddKeysToAgent=no -F none -oIdentityAgent=none "$@"
}
+
# kill off old shared socket then ssh
ssk() {
m ssh -O exit "$@" || [[ $? == 255 ]]
ccomp ssh sl slr sss ssk
# plain ssh
ssh() {
- if [[ $TERM == alacritty || $TERM == xterm-kitty ]]; then
- TERM=xterm-256color LC_USEBASHRC=t command ssh "$@"
- else
- LC_USEBASHRC=t command ssh "$@"
- fi
+ LC_USEBASHRC=t command ssh "$@"
}
+# log with script. timing is $1.t and script is $1.s
+# -l to save to ~/typescripts/
+# -t to add a timestamp to the filenames
slog() {
- # log with script. timing is $1.t and script is $1.s
- # -l to save to ~/typescripts/
- # -t to add a timestamp to the filenames
local logdir do_stamp arg_base
(( $# >= 1 )) || { echo "arguments wrong"; return 1; }
logdir="/a/dt/"
while getopts "lt" option
do
case $option in
- l ) arg_base=$logdir ;;
- t ) do_stamp=true ;;
+ l) arg_base=$logdir ;;
+ t) do_stamp=true ;;
+ *)
+ echo error: bad option
+ return 1
+ ;;
esac
done
shift $((OPTIND - 1))
$do_stamp && arg_base+=$(date +%F.%T%z)
script -t $arg_base.s 2> $arg_base.t
}
-splay() { # script replay
+# script replay
+splay() {
#logRoot="$HOME/typescripts/"
#scriptreplay "$logRoot$1.t" "$logRoot$1.s"
scriptreplay "$1.t" "$1.s"
}
+# sudo redo. be aware, this command may not work right on strange
+# distros or earlier software
sr() {
- # sudo redo. be aware, this command may not work right on strange distros or earlier software
if [[ $# == 0 ]]; then
sudo -E bash -c -l "$(history -p '!!')"
else
command srm -ll "$@"
}
+# scp a script then ssh and run it.
srun() {
scp $2 $1:/tmp
- ssh $1 /tmp/${2##*/} $(printf "%q\n" "${@:2}")
+ ssh $1 "/tmp/${2##*/}" "$(printf "%q\n" "${@:2}")"
}
-
+# swap 2 files
swap() {
local tmp
tmp=$(mktemp)
mv $tmp $2
}
-tclock() { # terminal clock
+# terminal clock
+tclock() {
local x
clear
date +%l:%_M
}
+# test existence / exists
te() {
- # test existence / exists
local ret=0
for x in "$@"; do
[[ -e "$x" || -L "$x" ]] || ret=1
return $ret
}
+# Turn off my prompt hook function (it makes set -x annoyingly verbose).
psoff() {
# normally, i would just execute these commands in the function.
# however, DEBUG is not inherited, so we need to run it outside a function.
# And we want to run set -x afterwards to avoid spam, so we cram everything
# in here, and then it will run after this function is done.
- PROMPT_COMMAND='trap DEBUG; unset PROMPT_COMMAND; PS1="\w \$ "'
+ # shellcheck disable=SC2178 # intentional
+ PROMPT_COMMAND='trap DEBUG; unset PROMPT_COMMAND; PS1=" \w \$ "'
}
+# Turn on my prompt hook function.
pson() {
- PROMPT_COMMAND=prompt-command
+ PROMPT_COMMAND=(prompt-command)
if [[ $TERM == *(screen*|xterm*|rxvt*) ]]; then
- trap 'settitle "$BASH_COMMAND"' DEBUG
+ trap 'auto-window-title "${BASH_COMMAND:0:40}"' DEBUG
fi
}
-tx() { # toggle set -x, and the prompt so it doesnt spam
+# prometheus node curl
+pnodecurl() {
+ local host
+ host=${1:-127.0.0.1}
+ s curl --cert-type PEM --cert /etc/prometheus/ssl/prometheus_cert.pem --key /etc/prometheus/ssl/prometheus_key.pem --cacert /etc/prometheus/ssl/prom_node_cert.pem --resolve prom_node:9100:$host -v https://prom_node:9100/metrics
+}
+
+# toggle set -x and my custom prompt so it doesnt spam
+tx() {
if [[ $- == *x* ]]; then
set +x
pson
fi
}
+# toggle set -u
+tsu() {
+ if [[ $- == *u* ]]; then
+ set +u
+ else
+ set -u
+ fi
+}
+
+# show all processes in the network namespace $1.
+# blank entries appear to be subprocesses/threads
psnetns() {
- # show all processes in the network namespace $1.
- # blank entries appear to be subprocesses/threads
local x netns
netns=$1
ps -w | head -n 1
if [[ $x ]]; then echo "$x"; else echo $l; fi;
done
}
-
+# start a shell in a network namespace connected to no networks.
+nonet() {
+ if ! s ip netns list | grep -Fx nonet &>/dev/null; then
+ s ip netns add nonet
+ fi
+ sudo -E env /sbin/ip netns exec nonet sudo -E -u iank /bin/bash
+}
+# echo args then run
m() { printf "%s\n" "$*"; "$@"; }
+# echo args then run and send output to stderr
+m2() { printf "%s\n" "$*" >&2; "$@"; }
+
+# better uptime
uptime() {
if type -p uprecords &>/dev/null; then
uprecords -B
fi
}
+# stop and delete given vm(s)
virshrm() {
for x in "$@"; do virsh destroy "$x"; virsh undefine "$x"; done
}
+# todo: dunno what this is about.
vm-set-listen(){
local t
t=$(mktemp)
}
myiwscan() {
- # find input, copy to pattern space, when we find the first field, print the copy in different order without newlines.
- # instead of using labels, we could just match a line and group, eg: /signal:/,{s/signal:(.*)/\1/h}
- sudo iw dev wls1 scan | sed -rn "
+ local i
+ interfaces=$(iw dev | awk '$1 == "Interface" {print $2}')
+ for i in $interfaces; do
+ echo "myiwscan: considering $i"
+ # find input, copy to pattern space, when we find the first field, print the copy in different order without newlines.
+ # instead of using labels, we could just match a line and group, eg: /signal:/,{s/signal:(.*)/\1/h}
+ sudo iw dev $i scan | sed -rn "
s/^\Wcapability: (.*)/\1/;Ta;h;b
:a;s/^\Wsignal: -([^.]+).*/\1/;Tb;H;b
# padded to min width of 20
:b;s/\WSSID: (.*)/\1 /;T;s/^(.{20}(.*[^ ])?) */\1/;H;g;s/(.*)\n(.*)\n(.*)/\2 \3 \1/gp;b
"|sort -r
+ done
+}
+
+# Run script by copying it to a temporary location first,
+# and changing directory, so we don't have any open
+# directories or files that could cause problems when
+# remounting.
+zr() {
+ local tmp
+ tmp=$(type -p "$1")
+ if [[ $tmp ]]; then
+ cd "$(mktemp -d)"
+ cp -a "$tmp" .
+ shift
+ ./"${tmp##*/}" "$@"
+ else
+ "$@"
+ fi
+}
+
+
+# * spark
+# spark 1 5 22 13 53
+# # => ▁▁▃▂▇
+
+# The MIT License
+# Copyright (c) Zach Holman, https://zachholman.com
+# https://github.com/holman/spark
+
+# As of 2022-10-28, I reviewed github forks that had several newer
+# commits, none had anything interesting. I did a little refactoring
+# mostly to fix emacs indent bug.
+
+# Generates sparklines.
+_spark_echo()
+{
+ if [ "X$1" = "X-n" ]; then
+ shift
+ printf "%s" "$*"
+ else
+ printf "%s\n" "$*"
+ fi
+}
+
+
+spark()
+{
+ local f tc
+ local n numbers=
+
+ # find min/max values
+ local min=0xffffffff max=0
+
+ for n in ${@//,/ }
+ do
+ # on Linux (or with bash4) we could use `printf %.0f $n` here to
+ # round the number but that doesn't work on OS X (bash3) nor does
+ # `awk '{printf "%.0f",$1}' <<< $n` work, so just cut it off
+ n=${n%.*}
+ (( n < min )) && min=$n
+ (( n > max )) && max=$n
+ numbers=$numbers${numbers:+ }$n
+ done
+
+ # print ticks
+ local ticks=(▁ ▂ ▃ ▄ ▅ ▆ ▇ █)
+
+ # use a high tick if data is constant
+ (( min == max )) && ticks=(▅ ▆)
+
+ tc=${#ticks[@]}
+ f=$(( ( (max-min) <<8)/( tc - 1) ))
+ (( f < 1 )) && f=1
+
+ for n in $numbers
+ do
+ _spark_echo -n ${ticks[$(( ((n-min)<<8)/f ))]}
+ done
+ _spark_echo
+}
+
+pdfwc() { local f; for f; do echo "$f" "$(pdfinfo "$f" | awk '/^Pages:/ {print $2}')"; done }
+
+
+# nvm install script appended this to my .bashrc. I dont want to run it all the time,
+# so put it in a function.
+nvm-init() {
+ export NVM_DIR="$HOME/.nvm"
+ # shellcheck disable=SC1091 # may not exist, & third party
+ [ -s "$NVM_DIR/nvm.sh" ] && source "$NVM_DIR/nvm.sh" # This loads nvm
+ # shellcheck disable=SC1091 # may not exist, & third party
+ [ -s "$NVM_DIR/bash_completion" ] && source "$NVM_DIR/bash_completion" # This loads nvm bash_completion
+}
+
+
+leap-year() {
+ if date -d 'february 29' &>/dev/null; then
+ year_days=366
+ else
+ year_days=365
+ fi
+ echo $year_days
+}
+
+# on-battery
+on-bat() {
+ if [[ -e /sys/class/power_supply/AC/online && $(</sys/class/power_supply/AC/online) == 0 ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+# make vim work with my light colortheme terminal.
+vim() {
+ if [[ -e ~/.vimrc ]]; then
+ command vim "$@"
+ else
+ command vim -c ':colorscheme peachpuff' "$@"
+ fi
+}
+
+# ls count. usage: pass a directory, get the number of files.
+# https://unix.stackexchange.com/questions/90106/whats-the-most-resource-efficient-way-to-count-how-many-files-are-in-a-director
+lsc() {
+ # shellcheck disable=SC2790 disable=SC2012 # intentional
+ ls -Uq "$@"|wc -l
+}
+
+# run then notify. close notification after the next prompt.
+rn() {
+ "$@"
+ dunstify -u critical -h string:x-dunst-stack-tag:profanity "$*"
+ _psrun=(dunstctl close-all)
+}
+n() {
+ dunstify -u critical -h string:x-dunst-stack-tag:profanity n
+ _psrun=(dunstctl close-all)
+}
+
+catnew() {
+ local dir file _
+ dir="$1"
+ # shellcheck disable=SC2030
+ inotifywait -m "$dir" -e create -e moved_to | while read -r _ _ file; do
+ hr
+ cat "$dir/$file"
+ done
+}
+# cat mail
+cm() {
+ catnew /m/md/$1/new
+}
+
+
+fsf-sv-header() {
+ local f
+ local -a f_maybe
+ if ! type -p sponge &>/dev/null; then
+ echo "$0: error: missing dependency: sudo apt install moreutils" >&2
+ return 1
+ fi
+
+ for f; do
+ echo "adding header to $f"
+ if [[ -s $f ]]; then
+ f_maybe=("$f")
+ else
+ f_maybe=()
+ fi
+ cat - "${f_maybe[@]}" <<EOF | sponge "$f"
+The following is the GNU All-permissive License as recommended in
+<https://www.gnu.org/licenses/license-recommendations.en.html>
+
+Copyright (C) $(date +%Y) Free Software Foundation <sysadmin@fsf.org>
+
+Copying and distribution of this file, with or without modification,
+are permitted in any medium without royalty provided the copyright
+notice and this notice are preserved. This file is offered as-is,
+without any warranty.
+
+Contributions are welcome. See <https://savannah.gnu.org/maintenance/fsf/>.
+
+EOF
+ done
+}
+
+# note, there is also the tool gron which is meant for this, but
+# this is good enough to not bother installing another tool
+jq-lines() {
+ # https://stackoverflow.com/questions/59700329/how-to-print-path-and-key-values-of-json-file-using-jq
+ jq --stream -r 'select(.[1]|scalars!=null) | "\(.[0]|join(".")): \(.[1]|tojson)"' "$@"
}
+tsr() { # ts run
+ "$@" |& ts || return $?
+}
+
+
# * misc stuff
if $use_color && type -p tput &>/dev/null; then
+ # according to man ls: run dircolors to get default.
+ # Did that then modified things.
+ #
+ # default too light:
+ # ex=00;32
+ # ln=01;36
+ #
+
+ # related references:
+ # https://www.bigsoft.co.uk/blog/2008/04/11/configuring-ls_colors
+ #
+ # this explains the codes: https://gist.github.com/thomd/7667642
+ #
+ export LS_COLORS='rs=0:di=01;34:ln=01;31:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=00:tw=30;42:ow=34;42:st=37;44:ex=00;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.avif=01;35:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.webp=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:*~=00;90:*#=00;90:*.bak=00;90:*.crdownload=00;90:*.dpkg-dist=00;90:*.dpkg-new=00;90:*.dpkg-old=00;90:*.dpkg-tmp=00;90:*.old=00;90:*.orig=00;90:*.part=00;90:*.rej=00;90:*.rpmnew=00;90:*.rpmorig=00;90:*.rpmsave=00;90:*.swp=00;90:*.tmp=00;90:*.ucf-dist=00;90:*.ucf-new=00;90:*.ucf-old=00;90:';
+
+ # note: this is nice for a dark background terminal:
+ # https://github.com/trapd00r/LS_COLORS
+ # I would like if there was something similar for light.
+
+
+
term_bold="$(tput bold)"
term_red="$(tput setaf 1)"
term_green="$(tput setaf 2)"
+ # shellcheck disable=SC2034 # expected
term_yellow="$(tput setaf 3)"
term_purple="$(tput setaf 5)"
term_nocolor="$(tput sgr0)" # no font attributes
# old enough to no longer condition on $BASH_VERSION anymore
shopt -s autocd
shopt -s dirspell
- PS1='\w'
if [[ $- == *i* ]] && [[ ! $LC_INSIDE_EMACS ]]; then
PROMPT_DIRTRIM=2
bind -m vi-command B:shell-backward-word
if [[ $SSH_CLIENT || $SUDO_USER ]]; then
unset PROMPT_DIRTRIM
- PS1="\h:$PS1"
fi
# emacs terminal has problems if this runs slowly,
# so I've thrown a bunch of things at the wall to speed it up.
prompt-command() {
local return=$? # this MUST COME FIRST
- local ps_char ps_color
+
+ PS1='\w'
+ if [[ $SSH_CLIENT ]]; then
+ PS1="\h:$PS1"
+ fi
+ if [[ $USER != iank && $USER != ROOT ]]; then
+ PS1="$USER $PS1"
+ fi
+
+ # all usable colors:
+ # black
+ # green nonzero exit (pri 1)
+ # purple default
+ # purple bold
+ # red pwd different owner & group & not writable (pri 2)
+ # red bold pwd different owner & group & writable (pri 2)
+ # yellow
+
+ local ps_char ps_color col tmp
+
+
+ # note, I tried reading the current column here in order to add a
+ # space if there was non-newline terminated output from the last
+ # command. However, I found that it made ssh escape char not
+ # work. In bash 5.0.17 after a command using <() it failed and
+ # output something like ";21" to the terminal after the prompt. It
+ # did work in 5.1 something. It did not work in emacs. And, one time
+ # it just randomly output to the terminal. Not worth it.
+
unset IFS
if [[ $HISTFILE ]]; then
- history -a # save history
- fi
+ # save history. if our filesystem went read-only or something, continue on
+ history -a ||:
+
- # assigned in brc2
- # shellcheck disable=SC1303
- if [[ $jr_pid ]]; then
- if [[ -e /proc/$jr_pid ]]; then
- kill $jr_pid
+ # if we aren't in ssh, maybe indicate history file
+ if [[ ! $SSH_CLIENT ]]; then
+ if [[ -e $HOME/.iank-stream-on ]]; then
+ if [[ $HISTFILE == $HOME/.bh ]]; then
+ ps_char="H_P "
+ fi
+ elif [[ $HISTFILE == /a/bin/data/stream_hist ]]; then
+ ps_char="H_S "
+ fi
+ if [[ $HISTFILE == $HOME/.bash_history ]]; then
+ ps_char="H_DEFAULT "
+ fi
fi
- unset jr_pid
fi
- case $return in
- 0) ps_color="$term_purple"
- ps_char='\$'
- ;;
- *) ps_color="$term_green"
- ps_char="$return \\$"
- ;;
- esac
+ ps_color="$term_purple"
+ ps_char="$ps_char"'\$'
if [[ ! -O . ]]; then # not owner
if [[ -w . ]]; then # writable
ps_color="$term_bold$term_red"
else
- ps_color="$term_bold$term_green"
+ ps_color="$term_red"
fi
fi
+ if [[ $return != 0 ]]; then
+ ps_color="$term_green"
+ ps_char="$return \\$"
+ fi
+
# faster than sourceing the file im guessing
if [[ -e /dev/shm/iank-status && ! -e /tmp/quiet-status ]]; then
- eval $(< /dev/shm/iank-status)
+ eval "$(< /dev/shm/iank-status)"
fi
if [[ $MAIL_HOST && $MAIL_HOST != "$HOSTNAME" ]]; then
ps_char="@ $ps_char"
fi
+ jobs_char=
+ if [[ $(jobs -p) ]]; then
+ jobs_char='j\j '
+ fi
+
+
+ # allow a function to specify a command to run after we run the next
+ # command. Use case: a function makes a persistent notification. If
+ # we happen to be using that terminal, we can just keep working by
+ # entering our next command, even a noop in order to dismiss the
+ # notification, instead of having to explicitly dismiss it.
+ if [[ ${_psrun[*]} ]]; then
+ if (( _psrun_count >= 1 )); then
+
+ "${_psrun[@]}" ||:
+ _psrun_count=0
+ unset _psrun
+ else
+ _psrun_count=$(( _psrun_count + 1 ))
+ fi
+ else
+ _psrun_count=0
+ fi
+
# We could test if sudo is active with sudo -nv
# but then we get an email and log of lots of failed sudo commands.
# We could turn those off, but seems better not to.
if [[ ! $HISTFILE ]]; then
ps_char="NOHIST $ps_char"
fi
- PS1="${PS1%"${PS1#*[wW]}"} $psudo\[$ps_color\]$ps_char\[$term_nocolor\] "
+ if [[ $(type -t ps-extra 2>/dev/null) == function ]]; then
+ ps-extra
+ fi
+ PS1="$PS1 $jobs_char$psudo\[$ps_color\]$ps_char\[$term_nocolor\] "
+
+ # copy of what is automatically added by guix.
+ # adds [env] to PS1 if GUIX_ENVIRONMENT is set and PS1 contains '$';
+ if [ -n "$GUIX_ENVIRONMENT" ]; then
+ if [[ $PS1 =~ (.*)"\\$" ]]; then
+ PS1="${BASH_REMATCH[1]} [env]\\\$ "
+ fi
+ fi
+
+ # version 211203 does not have this feature, 230805 does. we can't
+ # detect this over ssh. We could copy the var over ssh, but I'd
+ # rather just wait until i'm not running the earlier konsole.
+ if [[ $TERM == xterm-256color && $KONSOLE_VERSION && ! $KONSOLE_VERSION == 2[01]* ]]; then
+ # This is from konsole, copied after pressing ctrl-alt-] .
+ # I figured out what it does from reading git clone https://gitlab.freedesktop.org/Per_Bothner/specifications
+ #
+ #proposals/semantic-prompts.md
+ #
+ # I tried figuring out what they really do from the konsole source code,
+ # but I gave up.
+ #
+ # \[\e]133;L\a\] This makes it so the last command is always
+ # newline terminated. That is kind nice, but I also want to know
+ # when they aren't, and this screws up my we() function, so
+ # removed. The doc notes that Fish and ZSH both show a specific
+ # char to indicate that happened, that sounds nice so I figured
+ # out how to do that on my own.
+ #
+ # \[\e]133;D;$?\]
+ # This is something to try to show the last exit code. I already do that
+ # and colorize it so removed.
+ #
+
+ if [[ ! $PS1 =~ 133 ]] ; then
+ PS1='\[\e]133;A\a\]'"$PS1"'\[\e]133;B\a\]'
+ PS2='\[\e]133;A\a\]'"$PS2"'\[\e]133;B\a\]'
+ # shellcheck disable=SC2034 # false positive
+ PS0='\[\e]133;C\a\]'
+ fi
+ fi
- # set titlebar. instead, using more advanced
- # titelbar below
- #echo -ne "$_title_escape $HOSTNAME ${PWD/#$HOME/~} \007"
}
- PROMPT_COMMAND=prompt-command
+ PROMPT_COMMAND=(prompt-command)
if [[ $TERM == screen* ]]; then
_title_escape="\033]..2;"
_title_escape="\033]0;"
fi
- settitle () {
- # this makes it so we show the current command if
- # one is running, otherwise, show nothing
+ # make the titlebar be the last command and the current directory.
+ auto-window-title () {
- if [[ $1 == prompt-command ]]; then
+
+ # These are some checks to help ensure we dont set the title at
+ # times that the debug trap is running other than the case we
+ # want. Some of them might not be needed.
+ if (( ${#FUNCNAME[@]} != 1 || ${#BASH_ARGC[@]} != 2 || BASH_SUBSHELL != 0 )); then
return 0
fi
- if (( ${#BASH_ARGC[@]} == 1 && BASH_SUBSHELL == 0 )); then
- echo -ne "$_title_escape ${PWD/#$HOME/~} "
- printf "%s" "$*"
- echo -ne "\007"
+ if [[ $1 == prompt-command ]]; then
+ return 0
fi
+ # note: this could be useful to do something interesting.
+ #_iank_last_cmd="$*"
+ echo -ne "$_title_escape ${PWD/#$HOME/~} "
+ printf "%s" "$*"
+ echo -ne "\007"
}
# note, this wont work:
# condition from the screen man page i think.
# note: duplicated in tx()
if [[ $TERM == *(screen*|xterm*|rxvt*) ]]; then
- trap 'settitle "$BASH_COMMAND"' DEBUG
+ # 40 chars is plenty.
+ trap 'auto-window-title "${BASH_COMMAND:0:40}"' DEBUG
else
trap DEBUG
fi
fi
+
+lp22viewers() {
+ v=0
+ roomv=(0 0)
+ rooms=(jupiter saturn)
+ for ip in 209.51.188.25 live.fsf.org; do
+ out=$(curl -sS --insecure https://$ip/)
+ for i in 0 1 2; do
+ room=${rooms[i]}
+ while read -r n; do
+ v=$((v+n))
+ # shellcheck disable=SC2004 # false positive
+ roomv[$i]=$(( ${roomv[$i]} + n ))
+ done < <(printf "%s\n" "$out" | grep -Po "$room.*?current[^0-9]*[0-9]*" | grep -o '[0-9]*$' )
+ done
+ done
+ printf "total: %s " $v
+ for i in 0 1; do
+ room=${rooms[i]}
+ printf "$room: %s " "${roomv[$i]}"
+ done
+ echo
+}
+
+arpflush() {
+ local default_route_dev
+ default_route_dev=$(ip r show default | sed 's/.*dev \([^ ]*\).*/\1/' | head -n1)
+ m s ip n flush dev "$default_route_dev"
+}
+
+dsh() {
+ command dsh -c "$@"
+}
+
+
+
+# cat or bat with color if we have it
+v() {
+ if type -t batcat >/dev/null; then
+ # note: another useful useful style is "header"
+ batcat --color always --style plain --theme Coldark-Cold -P "$@"
+ else
+ cat "$@"
+ fi
+}
+
+# Combine files $@ into a single file with comments between them which
+# allow splitting them back with fsplit.
+#
+# Assumes file names do not have newlines in them.
+fcomb() {
+ local f comment out
+ # generated with cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c8
+ comment='# jvvuyUsq '
+ out=~/fcomb
+ rm -f $out
+ for f; do
+ echo "$comment$f" >>$out
+ cat "$f" >>$out
+ done
+}
+fsplit() {
+ local f fin line fin_lines
+ fin=~/fcomb
+ line=1
+ fin_lines=$(wc -l "$fin" | awk '{print $1}')
+ comment='# jvvuyUsq '
+ while (( line <= fin_lines )); do
+ f=$(sed -n "${line}s/^$comment//p" "$fin")
+ sed -n "$line,/^$comment/{/^$comment/d;p}" "$fin" >"$f"
+ line=$(( line + 1 + $(wc -l "$f" | awk '{print $1}') ))
+ done
+}
+
+# histignored function
+h() {
+ "$@"
+}
+
+tcpdump() {
+ # avoid buffered output when debugging, which will cause confusion.
+ command tcpdump -l "$@"
+}
+
+# Info on Parameter Expansion
+#
+# info cheat sheet:
+# H: see keybinds
+# / search, {, }: next/prev match
+# ctrl/alt-v scroll forward/backward within this node
+# l: go to previous node
+info-pe() {
+ info bash 'Basic Shell Features' 'Shell Expansions' 'Shell Parameter Expansion'
+}
+
+btrfs() {
+ local log_path regex=' sub[^ ]* (del[^ ]*|create|snap|rescue|replace) | send | receive '
+ if [[ $EUID == 0 && $* =~ $regex ]]; then
+ mkdir -p /var/log/btrbk
+ log_path=/var/log/btrbk/$(date +%F_%H_%M_%S%:::z).log
+ printf "running: %s\n" "$*" | 'ts "%F %T" >>'$log_path
+ command btrfs "$@" |& pee cat 'ts "%F %T" >>'$log_path
+ else
+ command btrfs "$@"
+ fi
+}
+
+
+# $1 = snapshot
+btrfs-snapshot-rw() {
+ # -ts = type snapshot
+ # -f = force, required to overcome this error:
+ #
+ # ERROR: cannot flip ro->rw with received_uuid set, use force option
+ # -f if you really want unset the read-only status. The value of
+ # received_uuid is used for incremental send, consider making a
+ # snapshot instead. Read more at btrfs-subvolume(8) and Subvolume
+ # flags.
+ #
+ btrfs property set -f -ts "$1" ro false
+}
+
+# $1 = snapshot
+btrfs-snapshot-ro() {
+ btrfs property set -f -ts "$1" ro true
+}
+
+
+brc3() {
+ source /a/c/brc3
+}
+
# * stuff that makes sense to be at the end
# best practice
unset IFS
-# shellcheck disable=SC1090
-[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
+if [[ -s "$HOME/.rvm/scripts/rvm" ]]; then
+ # shellcheck disable=SC1091
+ source "$HOME/.rvm/scripts/rvm"
+fi
# I had this idea to start a bash shell which would run an initial
# command passed through this env variable, then continue on