-Misc configs & ~1k loc in bashrc
+~2.7k lines of bash to setup all my computers
+
+Initial os install is also automated using a separate repo called
+automated-distro-installer, also at iankelling.org/git.
+
+This is not meant for other people to run verbatum, but for them to read
+and copy the good parts. It has dependencies on other repos at
+https://iankelling.org/git. Most notably, my bashrc, distro functions,
+and config files repo.
+
+
+The main thing missing for someone else to use things is the expected
+location of repos in the filesystem. So you would need to lookout for
+paths starting with /a and adjust them.
+
+Background: reasoning behind using /a: The home directory is typically
+used for local software development, but I use paths like /a instead,
+for the following reasons:
+
+1. I want to run code directly from where I work on it, instead of
+always having to create and use some install process. If that code is in
+a home directory, and you want to run it as root (especially in contexts
+where SUDO_USER is not set, like cron/systemd), you have to hardcode the
+username for /home/username, or create some install process where that
+username is saved somewhere, and then you are stuck with a single
+username. If it ever got packaged for a gnu/linux distro, it would rely
+on a hardcoded path with no username in it, so let's just do that.
+
+2. The home directory is inconvenient. It's filled with a bunch of junk
+you don't care about, which makes directory listing horrible, makes it so
+you can't back it up easily (for example, gvfs mountpoint in it breaks
+lots of things), and has things you don't want to backup. So, you could
+use a subdirectory. But typing /s is much faster than ~/s and in every
+root context, /home/username/s.
+
Part of how I run GNU/Linux. I try fully automate my systems and store
all configs and scripts, except private info at
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# todo: remove old leaf subvols, like keep up to 1 month or something.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+usage() {
+ echo "top of script file:"
+ sed -n '1,/^[# ]*end command line/{p;b};q' "$0"
+ exit $1
+}
+
+script_dir=$(dirname $(readlink -f "$BASH_SOURCE"))
+
+# note q is owned by root:1000
+# note p/m is owned 1000:1000 and chmod 700
+
+
+mountpoints=()
+
+rsync_mountpoint=/q
+
+conf_only=false
+dry_run=false # mostly for testing
+resume_arg=
+rate_limit=no
+verbose=false
+
+default_args_file=/etc/btrbk-run.conf
+if [[ -s $default_args_file ]]; then
+ set -- $(< $default_args_file) "$@"
+fi
+
+temp=$(getopt -l help cl:m:nprt:vh "$@") || usage 1
+eval set -- "$temp"
+while true; do
+ case $1 in
+ # only creates the config file, does not run btrbk
+ -c) conf_only=true; shift ;;
+ -l) rate_limit=$2; shift 2 ;;
+ -m) IFS=, mountpoints=($2); unset IFS; shift 2 ;;
+ -n) dry_run=true; dry_run_arg=-n; shift ;;
+ -p) progress_arg="--progress"; shift ;;
+ # btrbk arg: Resume only. Skips snapshot creation.
+ -r) resume_arg=-r; shift ;;
+ # empty is valid for just doing local snapshot. we have default hosts
+ # we will populate
+ -t) IFS=, targets=($2); unset IFS; shift 2 ;;
+ -v) verbose=true; verbose_arg=-v; shift ;;
+ -h|--help) usage ;;
+ --) shift; break ;;
+ *) echo "$0: Internal error!" ; exit 1 ;;
+ esac
+done
+
+if [[ -s $default_args_file ]]; then
+ echo "$0: warning: default btrbk-run options set in $default_args_file (sleeping 5 seconds):"
+ cat $default_args_file
+ sleep 5
+fi
+
+echo -e "$0: options: conf_only=$conf_only\ndry_run=$dry_run\nresume_arg=$resume_arg\nrate_limit=$rate_limit\nverbose=$verbose"
+
+# set default targets
+if [[ ! -v targets ]]; then
+ case $HOSTNAME in
+ x2|fz)
+ if [[ $HOSTNAME == "$MAIL_HOST" ]]; then
+ targets=($HOME_DOMAIN)
+ fi
+ ;;
+ tp)
+ targets=(frodo)
+ if [[ $HOSTNAME == "$MAIL_HOST" ]]; then
+ if timeout -s 9 10 ssh x2 :; then
+ targets+=(x2)
+ fi
+ fi
+ ;;
+ frodo)
+ targets=()
+ ;;
+ *)
+ echo "$0: error: no default targets for this host, use -t"
+ exit 1
+ ;;
+ esac
+fi
+
+echo "targets: ${targets[*]}"
+
+
+
+if (( ${#mountpoints[@]} )); then
+ for mp in ${mountpoints[@]}; do
+ if [[ -e /nocow/btrfs-stale/$mp ]]; then
+ echo "$0: warning: $mp stale. Sleeping for 3 seconds in case you want to cancel."
+ sleep 3
+ fi
+ done
+else # set default mountpoints
+ case $HOSTNAME in
+ frodo)
+ prospective_mps=(/i)
+ ;;
+ *)
+ prospective_mps=(/a /q)
+ if [[ $HOSTNAME == "$MAIL_HOST" ]]; then
+ prospective_mps+=(/o)
+ fi
+ ;;
+ esac
+ case ${targets[0]} in
+ kw|kww)
+ prospective_mps=(/a)
+ ;;
+ esac
+
+ for mp in ${prospective_mps[@]}; do # default mountpoints to sync
+ if [[ -e /nocow/btrfs-stale/$mp ]]; then
+ echo "$0: warning: $mp stale, not adding to default mountpoints"
+ continue
+ fi
+ if awk '{print $2}' /etc/fstab | grep -xF $mp &>/dev/null; then
+ mountpoints+=($mp)
+ fi
+ done
+fi
+
+echo "mountpoints: ${mountpoints[*]}"
+
+##### end command line parsing ########
+
+rsync-dirs() {
+ local host=$1
+ local path=$2
+ m rsync $dry_run_arg -ahi --relative --delete "$path" "root@$host:/"
+}
+
+vol-conf() {
+ cat >>/etc/btrbk.conf <<EOF
+volume $vol
+EOF
+}
+sub-conf() {
+ cat >>/etc/btrbk.conf <<EOF
+subvolume $sub
+EOF
+}
+tg-conf() {
+ cat >>/etc/btrbk.conf <<EOF
+target send-receive ssh://$tg$vol/btrbk
+EOF
+}
+m() { printf "%s: %s\n" "${0##*/}" "$*"; "$@"; }
+
+
+if ! which btrbk &>/dev/null; then
+ echo "$0: error: no btrbk binary found"
+fi
+
+cat >/etc/btrbk.conf <<EOF
+ssh_identity /root/.ssh/home
+# Just a guess that local7 is a good facility to pick.
+# It's a bit odd that the transaction log has to be logged to
+# a file or syslog, while other output is sent to std out.
+# The man does not mention a way for them to be together, but
+# I dunno if setting a log level like warn might also output
+# transaction info.
+transaction_syslog local7
+
+# so we only run one at a time
+lockfile /var/lock/btrbk.lock
+
+# default format of short does not accomidate hourly preservation setting
+timestamp_format long-iso
+
+# only make a snapshot if things have changed
+snapshot_create onchange
+# I could make this different from target_preserve,
+# if one disk had less space.
+# for now, keeping them equal.
+snapshot_preserve 36h 14d 8w 24m
+snapshot_preserve_min 4h
+snapshot_dir btrbk
+
+# so, total backups = ~89
+target_preserve 36h 14d 8w 24m
+target_preserve_min 4h
+
+# if something fails and it's not obvious, try doing
+# btrbk -l debug -v dryrun
+
+rate_limit $rate_limit
+EOF
+
+
+# if our mountpoints are from stale snapshots,
+# it doesn't make sense to do a backup.
+check-subvol-stale ${mountpoints[@]} || exit 1
+
+for tg in ${targets[@]}; do
+ # for an initial run, btrbk requires the dir to exist
+ ssh root@$tg mkdir -p /mnt/root/btrbk
+done
+
+
+for m in ${mountpoints[@]}; do
+ # for /i, some special cases. there is just one static target and direction.
+ if [[ $m == /i ]]; then
+ vol=/mnt/iroot
+ vol-conf
+ sub=i
+ sub-conf
+ tg=frodo
+ vol=/mnt/root
+ tg-conf
+ else
+ vol=/mnt/root
+ vol-conf
+ sub=${m##*/}
+ sub-conf
+ for tg in ${targets[@]}; do
+ tg-conf
+ done
+ fi
+done
+
+
+# todo: umount first to ensure we don't have any errors
+# todo: do some kill fuser stuff to make umount more reliable
+
+
+if $conf_only; then
+ exit
+fi
+
+if $dry_run; then
+ m btrbk -v -n $resume_arg run
+else
+ # -q and just using the syslog option seemed nice,
+ # but it doesn't show when a send has a parent and when it doesn't.
+ m btrbk $verbose_arg $progress_arg $resume_arg run
+fi
+
+# if we have it, sync to systems which don't
+if mountpoint $rsync_mountpoint >/dev/null; then
+ for tg in ${targets[@]}; do
+ case $tg in
+ tp|li|lk)
+ for x in /p/c/machine_specific/*.hosts; do
+ if grep -qxF $tg $x; then
+ dir=${x%.hosts}
+ rsync-dirs $tg $dir
+ fi
+ done
+ ;;
+ esac
+ done
+fi
+
+if ! $dry_run; then
+ m $script_dir/mount-latest-remote ${targets[@]}
+fi
+
+
+# todo: move variable data we don't care about backing up
+# to /nocow and symlink it.
+
+
+# background on btrbk timezones. with short/long, timestamps use local time.
+# for long, if your local time moves backwards, by moving timezones or
+# for an hour when daylight savings changes it, you will temporarily get
+# a more aggressive retention policy for the overlapping period, and
+# vice versa for the opposite timezone move. The alternative is using
+# long-iso, which puts timezone info into the timestamp, which means
+# that instead of shifting time, you shift the start of day/week/month
+# which is used for retention to your new local time, which means for
+# example, if you moved forward by 8 hours, the daily/weekly/monthly
+# retention will be 8 hours more aggressive since midnight is at a new
+# time, unless you fake the timzeone using the TZ env variable.
+# However, in the short term, there will be no inconsistencies.
+# I don't see any problem with shifting when the day starts for
+# retention, so I'm using long-iso.
+
+# note to create a long-iso timestamp: date +%Y%m%dT%H%M%S%z
--- /dev/null
+#!/bin/bash
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+# inspired from
+# https://github.com/kdave/btrfsmaintenance
+
+
+# Man page says we could also use a range, i suppose it would be
+# logical to use a pattern like 5..10 10..20,
+# but I don't know if this would help us at all.
+dusage="1 5 10 20 30 40 50"
+musage="1 5 10 20 30"
+
+e() { echo "cron: $*"; "$@"; }
+
+check-idle() {
+ export DISPLAY=:0
+ # 3 hours, assume a movie might run that long.
+ idle_limit=$((1000 * 60 * 60 * 3))
+ idle_time=$idle_limit
+ id=999
+ while id $((++id)) &>/dev/null; do
+ new_idle_time=$(sudo -u \#$id xprintidle 2>/dev/null) ||:
+ if [[ $new_idle_time && $new_idle_time -lt $idle_time ]]; then
+ idle_time=$new_idle_time
+ fi
+ done
+ if (( idle_time < idle_limit )); then
+ idle=false
+ else
+ idle=true
+ fi
+}
+
+if [[ $1 == check ]]; then
+ check=true
+else
+ check=false
+fi
+
+check-idle
+
+fnd="findmnt --types btrfs --noheading"
+for x in $($fnd --output "SOURCE" --nofsroot | sort -u); do
+ mnt=$($fnd --output "TARGET" --first-only --source $x)
+ [[ $mnt ]] || continue
+
+ if ! $idle; then
+ btrfs scrub cancel $mnt &>/dev/null ||:
+ continue
+ fi
+ if $check; then
+ continue
+ fi
+
+ # for comparing before and after balance.
+ # the log is already fairly verbose, so commented.
+ # e btrfs filesystem df $mnt
+ # e df -H $mnt
+ if btrfs filesystem df $mnt | grep -q "Data+Metadata"; then
+ for usage in $dusage; do
+ e btrfs balance start -dusage=$usage -musage=$usage $mnt
+ done
+ else
+ e btrfs balance start -dusage=0 $mnt
+ for usage in $dusage; do
+ e btrfs balance start -dusage=$usage $mnt
+ done
+ e btrfs balance start -musage=0 $mnt
+ for usage in $musage; do
+ e btrfs balance start -musage=$usage $mnt
+ done
+ fi
+ # e btrfs filesystem df $mnt
+ # e df -H $mnt
+ date=$(
+ btrfs scrub status $mnt | \
+ sed -rn 's/^\s*scrub started at (.*) and finished.*/\1/p'
+ )
+ if [[ $date ]]; then
+ date=$(date --date="$date" +%s)
+ # if date is sooner than 90 days ago
+ # the wiki recommends 30 days or so, but
+ # it makes the comp lag like shit for a day,
+ # so I'm going with 90 days.
+ if (( $date > `date +%s` - 60*60*24*30 )); then
+ echo "cron: skiping scrub of $mnt"
+ continue
+ fi
+ fi
+ e btrfs scrub start -Bd $mnt
+done
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2017 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+domain_user=(
+ pump.iankelling.org pumpio
+ mumble.iankelling.org mumble-server
+)
+
+for ((i=0; i<${#domain_user[@]}; i+=2)); do
+ domain=${domain_user[i]}
+ user=${domain_user[i+1]}
+
+ d=/etc/letsencrypt/live/$domain
+ if [[ $RENEWED_LINEAGE == $d ]]; then
+ install -m 640 -g $user $d/{privkey.pem,fullchain.pem} $(eval echo ~$user)
+ exit 0
+ fi
+done
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# usage: $0 SUBVOL_MOUNTPOINT...
+#
+# In git, this is not not executable because it's meant to be installed
+# using ./install-my-scripts
+#
+# If latest subvols $@ are not mounted, exit 1, print message, and touch
+# /nocow/btrfs-stale/$subvol
+#
+# Either SUBVOL_MOUNTPOINT is a snapshot of the latest, or
+# the latest snapshot is snapshot of SUBVOL_MOUNTPOINT.
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+shopt -s nullglob
+
+if [[ ! $@ ]]; then
+ echo "$0: error: expected mountpoint argument"
+fi
+
+stale-file() {
+ stale_dir=/nocow/btrfs-stale
+ stale_file=$stale_dir/$vol
+ if $stale; then
+ mkdir -p $stale_dir
+ printf "%s\n" $freshest_snap > $stale_file
+ ret=1
+ else
+ rm -f $stale_file
+ fi
+
+}
+
+ret=0
+for d; do
+ vol=${d##*/}
+ dev=$(sed -rn "s,^\s*([^#]\S*)\s+$d\s.*,\1,p" /etc/fstab|head -n1)
+ subvol_dir=$(sed -rn "s,^\s*[^#]\S*\s+$d\s.*\bsubvol=([a-zA-A/]+).*,\1,p" /etc/fstab|head -n1)
+ # note, we need $dev because $d might not be mounted, and we do this loop
+ # because the device in fstab for the rootfs can be different.
+ for devx in $(btrfs fi show $dev| sed -rn 's#.*path (/\S+)$#\1#p'); do
+ root_dir=$(sed -rn "s,^\s*$devx\s+(\S+).*\bsubvolid=0\b.*,\1,p" /etc/fstab|head -n1)
+ if [[ $root_dir ]]; then break; fi
+ done
+ svp=$root_dir/$subvol_dir # subvolume path
+
+ snaps=($root_dir/btrbk/$subvol_dir.20*) # Assumes we are in the 21st century.
+ if [[ ! $snaps ]]; then
+ # no snapshots yet
+ echo "$0: warning. no snapshots found. this is expected for a brand new volume"
+ continue
+ fi
+
+ # get info on last received sub
+ last_received_gen=0
+ for f in ${snaps[@]}; do
+ show="$(btrfs sub show $f)"
+ if echo "$show" | grep -E "Received UUID:\s+[[:alnum:]]" &>/dev/null; then
+ cgen=$(echo "$show" | sed -rn 's,^\s*Gen at creation:\s+([0-9]+).*,\1,p')
+ if [[ $cgen -gt $last_received_gen ]]; then
+ last_received_cgen=$cgen
+ last_received=$f
+ fi
+ fi
+ done
+
+ # Get last_snap by date.
+ # when a btrbk bugfix makes it into the distro,
+ # we might replace this with btrbk list latest /mnt/root/$vol | ...
+ last_snap=$(
+ for s in ${snaps[@]}; do
+ f=${s##*/}
+ unix_time=$(date -d $(sed -r 's/(.{4})(..)(.{5})(..)(.*)/\1-\2-\3:\4:\5/' <<<${f#$vol.}) +%s)
+ printf "%s %s\n" $unix_time $s
+ done | sort -r | head -n 1 | awk '{print $2}'
+ )
+ if [[ ! $last_snap ]]; then
+ # should not happen.
+ echo "$0: error: could not find latest snapshot for $svp among ${snaps[*]}"
+ ret=1
+ continue
+ fi
+
+ if [[ ! -e $svp ]]; then
+ echo "$0: warning: subvol does not exist: $svp"
+ echo "$0 assuming this host was just for receiving and latest snap is freshest"
+ freshest_snap=$last_snap
+ stale=true
+ stale-file
+ continue
+ fi
+
+
+ # if there is a last_received, we can assume stale or fresh if we are newer/older
+ if [[ $last_received ]]; then
+ svp_cgen=$(btrfs sub show $svp | sed -rn 's,^\s*Gen at creation:\s+([0-9]+).*,\1,p')
+ if [[ $svp_cgen -ge $last_received_cgen ]]; then
+ stale=false
+ else
+ echo "$svp stale: it's gen at creation, $svp_cgen, is earlier than the last received snapshot, $last_received's gen at creation: $last_received_cgen"
+ freshest_snap=$last_received
+ stale=true
+ fi
+ stale-file
+ continue
+ fi
+
+ # fallback to using last_snap as the freshest
+ freshest_snap=$last_snap
+ stale=true
+ # fresh if $svp has $last_snap as a snapshot,
+ if btrfs sub show $svp 2>/dev/null | sed '0,/^\s*Snapshot(s):/d;s/^\s*//' | \
+ grep -xF btrbk/$last_snap &>/dev/null; then
+ stale=false
+ else # or else $svp is a snapshot of $last_snap. we use a uuid
+ # comparison, which if I remember from the docs, is a bit more
+ # robust, perhaps to renames.
+ last_snap_uuid=$(btrfs sub show $last_snap| awk '$1 == "UUID:" {print $2}')
+ if btrfs sub show $svp| grep "^\s*Parent UUID:\s*$last_snap_uuid$" &>/dev/null; then
+ stale=false
+ fi
+ fi
+
+ stale-file
+
+done
+exit $ret
--- /dev/null
+#!/bin/bash
+
+source /a/bin/errhandle/errcatch-function
+source /a/bin/errhandle/bash-trace-function
+
+errcatch
+
+m() {
+ echo "$*"
+ "$@"
+}
+s() { sudo "$@"; }
+lnf() { /a/exe/lnf "$@"; }
+
+
+shopt -s nullglob
+shopt -s extglob # note, already set with bash -l
+
+# If we make a link back to the root, we stop going deeper into subdir_files.
+# This makes it so we can do subdir directories.
+#
+# Also note, under filesystem/, symlinks are expanded.
+
+subdir-link-r() {
+ local root="$1"
+ local targets=()
+ if [[ $2 ]]; then
+ targets=( "$2"/!(.git|..|.) )
+ else
+ for f in "$1"/!(.git|..|.); do
+ [[ -d $f ]] && targets+=("$f") ||:
+ done
+ fi
+ local below="$( readlink -f "$root/..")"
+ for path in "${targets[@]}"; do
+ local fullpath="$(readlink -f "$path")"
+ #e $fullpath $below # debug
+ if [[ -f $path || $(dirname $(readlink -f "$fullpath")) == "$below" ]]; then
+ m lnf -T "$path" "$HOME/${path#$root/}"
+ elif [[ -d "$path" ]]; then
+ subdir-link-r "$root" "$path"
+ fi
+ done
+}
+
+
+
+common-file-setup() {
+ local dir fs x bdir f dst
+ for dir in "$@"; do
+ fs=$dir/filesystem
+ if [[ -e $fs && $USER =~ ^iank?$ ]]; then
+ # note, symlinks get resolved, not copied.
+ s tar --mode=g-s --owner=0 --group=0 -cz -C $fs . | s tar -xz -C /
+ fi
+
+ if [[ -e $dir/subdir_files ]]; then
+ m subdir-link-r $dir/subdir_files
+ fi
+ local x=( $dir/!(binds|subdir_files|filesystem|machine_specific|..|.) )
+ (( ${#x[@]} >= 1 )) || continue
+ m lnf ${x[@]} ~
+ done
+}
+
+all_dirs=({/a/c,/p/c}{,/machine_specific/$HOSTNAME})
+# note, we assume a group of hosts does not have the
+# same name as a single host, which is no problem on our scale.
+for x in /p/c/machine_specific/*.hosts; do
+ if grep -qxF $HOSTNAME $x; then all_dirs+=( ${x%.hosts} ); fi
+done
+
+c_dirs=(/a/c{,/machine_specific/$HOSTNAME})
+case $USER in
+ ian|iank)
+ # p needs to go first so .ssh link is created, then config link inside it
+ m common-file-setup ${all_dirs[@]}
+
+ #### begin special extra stuff ####
+ install -d -m700 ~/gpg-agent-socket
+ if [[ -d /etc/bind/bind-writable ]]; then
+ # need bind writable dir for nsupdate, or else we get
+ # named[20823]: /etc/bind/db.iank.pw.jnl: create: permission denied
+ m s chgrp bind /etc/bind/bind-writable
+ fi
+ files=(/etc/bind/[Kk]*)
+ if [[ -e $files ]]; then
+ m s chgrp bind ${files[@]}
+ fi
+ if [[ -e /etc/davpass ]] && getent group www-data &>/dev/null; then
+ s chgrp www-data /etc/davpass
+ fi
+ ##### end special extra stuff #####
+
+
+
+ m sudo -H -u traci "$BASH_SOURCE"
+ ;;
+ traci)
+ m common-file-setup ${c_dirs[@]}
+ ;;
+ *)
+ echo "$0: error: unexpected user"; exit 1
+ ;;
+esac
--- /dev/null
+#!/bin/bash -l
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+
+
+
+ip=$(host demohost | sed -rn 's/^\S+ has address //p;T;q')/32
+cedit demohost /etc/exports <<EOF || exportfs -ra
+/a $ip(async,rw,no_subtree_check,no_root_squash)
+/q $ip(async,rw,no_subtree_check,no_root_squash)
+EOF
+
+
+ssh root@demohost bash -s <<'EOFOUTER'
+set -eE -o pipefail
+
+apt-get install -y nfs-common
+
+sed -ri '\%^(faiserver:/[aq]|/q/p)\s%d' /etc/fstab
+cat >>/etc/fstab <<'EOF'
+faiserver:/a /a nfs defaults 0 0
+faiserver:/q /q nfs defaults 0 0
+/q/p /p none bind 0 0
+EOF
+
+mkdir -p /a /q /p
+mount /a
+mount /q
+mount /p
+EOFOUTER
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# first 2 alternatives showed under ubuntu 14.04, second 2 under arch at 11/2015
+
+if [[ $1 ]]; then
+ right_monitor_rotation=left
+else
+ right_monitor_rotation=normal
+fi
+
+
+xout="$(xrandr)"
+xe() { echo "$xout"; }
+x=$(xe | grep -E '^(DisplayPort-[0123]|DVI-0|DP-[1234]|DVI-I-1) connected' | wc -l)
+if (( x > 2 )); then
+ left=$(xe | sed -rn 's/^(DVI[^ ]+) connected .*/\1/p')
+ dps=( $(xe | sed -rn 's/^(DP-[01234]|DisplayPort-[01234]) connected .*/\1/p') )
+
+ middle=${dps[1]}
+ right=${dps[0]}
+ # on older distros, i needed to swap middle and right.
+
+
+ xrandr --output $left --mode 2560x1600 --pos 0x0 --rotate left \
+ --output $middle --mode 2560x1600 --pos 1600x0 --rotate left \
+ --output $right --mode 2560x1600 --pos 3200x0 --rotate $right_monitor_rotation
+elif (( x == 2 )); then
+ # 3rd monitor not working atm, so doing this.
+ left=$(xe | sed -rn 's/^(DVI[^ ]+) connected .*/\1/p')
+ middle=$(xe | sed -rn 's/^(DP-[01234]|DisplayPort-[01234]) connected .*/\1/p')
+ xrandr --output $left --mode 2560x1600 --pos 0x0 --rotate left \
+ --output $middle --mode 2560x1600 --pos 1600x0 --rotate left
+
+fi
+/a/bin/distro-setup/input-setup
+if isarch; then
+ pulseaudio --start
+fi
+#indicator-kdeconnect
+date "+%A, %B %d, %r, %S seconds" > /tmp/desktop-20-autostart-log
--- /dev/null
+#!/bin/bash
+
+if ! pgrep -u $EUID emacs; then
+ emacs --daemon &
+fi
+pidgin &
+linphone &
+if ! pgrep -u $EUID -f "firefox -P sfw"; then
+ firefox -P sfw &
+fi
+#nagstamon &
--- /dev/null
+Things I stopped using and will become broken over time, but better to
+have here than just in git history.
--- /dev/null
+#!/bin/bash
+## bitcoin disabled. fees too high
+
+if [[ $HOSTNAME == frodo ]]; then
+ case $distro in
+ debian)
+ if [[ `debian-archive` == testing ]]; then
+ # has no unstable dependencies
+ pi bitcoind/unstable
+ src=/a/opt/bitcoin/contrib/init/bitcoind.service
+ s cp $src /etc/systemd/system
+ p=/etc/bitcoin/bitcoin
+ dst=/etc/systemd/system/bitcoinjm.service
+ # jm for joinmarket
+ $sed -r "/^\s*ExecStart/s,${p}.conf,${p}jm.conf," $src \
+ >/etc/systemd/system/bitcoinjm.service
+
+ d=jm; jm=d # being clever for succinctness
+ for s in d jm; do
+ s $sed -ri "/^\s*\[Unit\]/a Conflicts=bitcoin${!s}.service" \
+ /etc/systemd/system/bitcoin${s}.service
+ done
+
+ ser daemon-reload
+
+ dir=/nocow/.bitcoin
+ s mkdir -p $dir
+ s chown -R bitcoin:bitcoin $dir
+ dir=/etc/bitcoin
+ s mkdir -p $dir
+ s chown -R root:bitcoin $dir
+ s chmod 750 $dir
+
+ # pruning decreases the bitcoin dir to 2 gb, keeps
+ # just the recent blocks. can\'t do a few things like
+ # import a wallet dump.
+ # pruning works, but people had to do
+ # some manual stuff in joinmarket. I dun need the
+ # disk space, so not bothering yet, maybe in a year or so.
+ # https://github.com/JoinMarket-Org/joinmarket/issues/431
+ #https://bitcoin.org/en/release/v0.12.0#wallet-pruning
+ #prune=550
+
+ f=$dir/bitcoin.conf
+ s dd of=$f <<EOF
+ server=1
+ # necessary for joinmarket, not bad in general
+ rpcpassword=$(openssl rand -base64 32)
+ rpcuser=$(openssl rand -base64 32)
+ EOF
+
+ # dunno about sharing a wallet between multiple instances
+ # manually did, wallet.dat symlinked in /nocow/.bitcoin
+ sgo bitcoind
+ fi
+ ;;
+ # other distros unknown
+ esac
+
+
+
+ ## disabling joinmarket, its too expensive
+ ### begin joinmarket setup ###
+
+ case $distro in
+ debian)
+ f=$dir/bitcoin.conf
+ f2=$dir/bitcoinjm.conf
+ s cp $f $f2
+ s tee -a $f2 >/dev/null <<EOF
+ # Joinmarket
+ walletnotify=curl -sI --connect-timeout 1 http://localhost:62602/walletnotify?%s
+ alertnotify=curl -sI --connect-timeout 1 http://localhost:62602/alertnotify?%s
+ wallet=joinmarket.dat
+ EOF
+
+ ;;
+ # other distros unknown
+ esac
+
+ pi libsodium-dev python-pip
+ cd /a/opt/joinmarket
+ # using develop branch, as it seems to be mostly bug fixes,
+ # and this is quite new software.
+ # note: python3 does not work.
+ # has seg fault error due to some bug, but it still works
+ pip install -r requirements.txt || [[ $? == 139 ]]
+ # note, the target must exist ahead of time, or bitcoin
+ # just overwrites the link, and it\'s not happy with an empty file,
+ # so we have to create the wallet, then move and link it.
+ s lnf -T /q/bitcoin/wallet.dat /nocow/.bitcoin/wallet.dat
+ s lnf -T /q/bitcoin/joinmarket.dat /nocow/.bitcoin/joinmarket.dat
+ # not technically needed, but seems cleaner not to have
+ # symlinks be root owned unlike everything else
+ s chown -h bitcoin:bitcoin /nocow/.bitcoin/*
+
+ for var in rpcuser rpcpassword; do
+ u="$(s sed -rn "s/^$var=(.*)/\1/p" /etc/bitcoin/bitcoin.conf)"
+ # escape backslashes
+ u="${u//\\/\\\\\\\\}"
+ # escape commas
+ u="${u//,/\\,}"
+ sed -ri "s,^(rpc_${var#rpc}\s*=).*,\1 $u," joinmarket.cfg
+ done
+ sed -ri "s/^\s*(blockchain_source\s*=).*/\1 bitcoin-rpc/" joinmarket.cfg
+ ### end joinmarket setup ###
+fi
--- /dev/null
+#!/bin/bash -l
+
+# this is from distro-end
+
+pi kodi
+
+# based on https://wiki.debian.org/SecuringNFS
+# but the quota stuff is either outdated or optional,
+# i guessed that it was not needed and it worked fine.
+s dd of=/etc/sysctl.d/nfs-static-ports.conf <<'EOF'
+fs.nfs.nfs_callback_tcpport = 32764
+fs.nfs.nlm_tcpport = 32768
+fs.nfs.nlm_udpport = 32768
+EOF
+s sysctl --system
+s $sed -ri -f - /etc/default/nfs-common <<'EOF'
+/^\s*STATDOPTS=/d
+$a STATDOPTS="--port 32765 --outgoing-port 32766"
+EOF
+
+s $sed -ri -f - /etc/default/nfs-kernel-server <<'EOF'
+/^\s*RPCMOUNTDOPTS=/d
+$a RPCMOUNTDOPTS="--manage-gids --port 32767"
+EOF
+ser restart nfs-kernel-server
+
+if [[ $HOSTNAME == treetowl ]]; then
+ # persistent one time steps for webdav:
+ # create persistent password, put it in ~/.kodi/userdata/advancedsettings.xml,
+ # per http://kodi.wiki/view/MySQL/Sync_other_parts_of_Kodi
+ # htpasswd -c /p/c/filesystem/etc/davpass dav
+ # chmod 640 /p/c/filesystem/etc/davpass
+ # in conflink, set group to www-data.
+ # In kodi, i set the music source, server address: my domain,
+ # path: k/music. Then copied the file
+ # /p/c/subdir_files/.kodi/userdata/sources.xml to save that setting.
+ s a2enmod dav dav_fs
+ web-conf -r /a/c/playlists - apache2 dav.$HOME_DOMAIN <<'EOF'
+<Directory /a/c/playlists>
+ DAV On
+ AuthType Basic
+ AuthName "Authentication Required"
+ AuthUserFile "/etc/davpass"
+ Require valid-user
+
+# outside the standard /var/www, so use this:
+ Order allow,deny
+ Allow from all
+</Directory>
+EOF
+ s mkdir -p /var/www/davlock
+ s chown www-data:www-data /var/www/davlock
+ s sed -i "1i DavLockDB /var/www/davlock/davlock" /etc/apache2/sites-enabled/dav.$HOME_DOMAIN.conf
+ ser reload apache2
+
+ teeu /etc/exports "/k/music *(ro,nohide,async,no_subtree_check,insecure)"
+ exportfs -ra
+
+ # kodi uses sqlite by default, but supports mysql.
+ pi mariadb-server
+
+ # see ofswiki.org for explanation.
+ dbpass="$(cat /p/mysql-root-pass)"
+ if ! echo exit|mysql -uroot "-p$dbpass"; then
+ echo -e "\n\n$dbpass\n$dbpass\n\n\n\n\n" | mysql_secure_installation
+ fi
+ mysql -uroot "-p$dbpass" <<EOF
+GRANT ALL PRIVILEGES ON *.* TO 'kodi' IDENTIFIED BY '$(</p/mysql-kodi-pass)';
+EOF
+ s sed -ri 's/^(\s*bind-address\s*=).*/\1 0.0.0.0/' /etc/mysql/mariadb.conf.d/50-server.cnf
+ ser restart mariadb
+
+fi
--- /dev/null
+#!/bin/bash
+
+# not caring enough about having a new firefox at the moment, so
+# disabled.
+
+# this was just under the comment "basic needed packages" in
+# distro-begin.
+
+case $(distro-name) in
+ debian)
+ if has_x; then
+ if isdebian-stable; then
+ pi firefox/$codename-backports
+ else
+ # for a while, firefox/unstable did not have
+ # dependencies satisfied by testing packages, and i hit
+ # a conflict, it wanted a newer libfontconfig1, but
+ # emacs build-deps wanted an older one. In this case,
+ # I switch to using firefox-esr. note: They seem
+ # to release a new esr version every 9 months or so.
+ pi firefox/unstable
+ s dd of=/etc/apt/preferences.d/firefox <<'EOF'
+Package: firefox
+Pin: release a=unstable
+Pin-Priority: 500
+EOF
+ fi
+ fi
+ # # no hosts have nonfree firmware anymore, yay. but leaving commented,
+ # # as i might run into one for a little while still.
+ # p=firmware-linux-nonfree
+ # if apt-cache show $p &>/dev/null; then
+ # pi $p
+ # fi
+ ;;&
+ trisquel|ubuntu)
+ if has_x; then
+ pi abrowser
+ fi
+ ;;
+esac
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+
+# Automated phabricator setup. Not currently using it,
+# but it worked last time I tried it.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?"' ERR
+
+set -x
+
+
+# lj is test server
+case $HOSTNAME in
+ lj)
+ domain=phab.iank.bid
+ alt_domain=fastmail.wiki
+ ;;
+ lk)
+ domain=phab.iankelling.org
+ alt_domain=iankellingusercontent.org
+ ;;
+esac
+
+
+pass=`cat /p/c/machine_specific/$HOSTNAME/phabricator_admin`
+webroot=/usr/share/phabricator/webroot
+user=iank
+name="Ian Kelling"
+email=ian@iankelling.org
+ssh_port=222
+
+fbin() { bin=$1; shift; sudo /usr/share/phabricator/bin/$bin "$@"; }
+fsetd() { fbin config set --database "$@"; }
+
+# phabricator complained about wanting arcanist first
+pi arcanist/unstable mercurial
+
+# duplicated in mediawiki setup. todo fix that.
+s DEBIAN_FRONTEND=noninteractive pi mysql-server
+cd # mysql_secure_installation writes some temp files to the current dir,
+# so we need to make sure it's writable.
+if echo exit|mysql -u root -p"$dbpass"; then
+ echo -e "$dbpass\nn\n\n\n\n" | mysql_secure_installation
+else
+ echo -e "\n\n$dbpass\n$dbpass\n\n\n\n\n" | mysql_secure_installation
+fi
+
+mysql -u root -p$dbpass <<EOF
+grant all privileges on \`phabricator\\_%\`.* to 'phabricator'@localhost identified by '$pass';
+EOF
+
+phab-sel() {
+ s debconf-set-selections<<EOF
+phabricator phabricator/pwd_check password $pass
+phabricator phabricator/phabricator_mysql_pwd password $pass
+phabricator phabricator/webserver select None
+phabricator phabricator/phabricator_mysql_user string phabricator
+phabricator phabricator/mysql_host string localhost
+# Domain name or subdomain name used by phabricator:
+phabricator phabricator/domain_name string $domain
+EOF
+}
+phab-sel
+
+pi phabricator/unstable
+
+# debian sets http, but we want https
+s sed -i --follow-symlinks 's/http:/https:/' /usr/share/phabricator/conf/local/local.json
+
+
+acme-tiny-wrapper $domain
+acme-tiny-wrapper $alt_domain
+
+for x in $domain $alt_domain; do
+ web-conf -r $webroot - $x <<EOF
+RewriteEngine on
+RewriteRule ^/rsrc/(.*) - [L,QSA]
+RewriteRule ^/favicon.ico - [L,QSA]
+RewriteRule ^/php5-fcgi - [L]
+RewriteRule ^(.*)\$ /index.php?__path__=\$1 [B,L,QSA]
+<Directory "$webroot">
+ Require all granted
+</Directory>
+EOF
+done
+
+
+# Before I figured out how to setup the admin in the script,
+# this would limit the site to localhost,
+# and access it through an ssh tunnel until its secure.
+#phab-site -p 127.0.0.1:443
+
+# settings are stored in conf/local/local.json.
+# some settings could also be stored in the database with
+# --database arg. database has higher priority than
+# the config file.
+
+# if you need to restart phabricator, just ser restart apache2
+# https://secure.phabricator.com/book/phabricator/article/restarting/
+
+# to reset things, you can do.
+# fbin storage destroy; pu phabricator; phab-sel; pi phabricator/unstable
+# # but under debian, prolly better to purge, cause db gets created on install
+
+
+# On first run went to the website, registered manually, then
+# went through the gui setup items to get the configuration below.
+
+
+#expect "*"
+#sleep 1
+
+# expect's exits with 0 by default on timeout of an expect command.
+# You can modify this, but it was simpler to use an irregular code to detect
+# actual success.
+sudo expect -d <<EOF
+# The expect lines use shell type globbing. They are not actually
+# needed, but they make the script likely to fail if the questions
+# content changes drastically, and make the script self documenting.
+
+# adds a short delay after each send for more reliable operation
+# (reference: comment in any autoexpect generated script)
+set force_conservative 0
+spawn "/usr/share/phabricator/bin/accountadmin"
+# If we've already set our user, detect different prompt and exit
+# expect basics: when the last alternative matches, there is no need
+# to specify an action, we just continue.
+expect {
+ timeout {exit 1}
+ -nocase "enter a username" exit
+ -nocase "y/n"
+}
+send "y\r"
+expect -nocase timeout {exit 1} "username"
+send "$user\r"
+expect -nocase timeout {exit 1} "create*y/n"
+send "y\r"
+expect -nocase timeout {exit 1} "name"
+send "$name\r"
+expect -nocase timeout {exit 1} "email"
+send "$email\r"
+expect -nocase timeout {exit 1} "password"
+send "$pass\r"
+expect -nocase timeout {exit 1} "bot"
+send "n\r"
+expect -nocase timeout {exit 1} "admin"
+send "y\r"
+expect -nocase timeout {exit 1} "save"
+send "y\r"
+expect eof
+exit
+EOF
+
+
+
+# this tipped me over to using a debian package
+# https://secure.phabricator.com/T4181
+
+fsetd auth.require-approval false
+
+# phabricator recommends going from 16 to at least 32
+sudo sed -ri 's/(^\s*max_allowed_packet)[[:space:]=].*/\1 = 100M/' /etc/mysql/my.cnf
+
+
+setini() {
+ key="$1" value="$2" section="$3" file="$4"
+ sudo sed -ri "/ *\[$section\]/,/^ *\[[^]]+\]/{/^\s*$key[[:space:]=]/d};/ *\[$section\]/a $key = $value" "$file"
+}
+
+setd() { setini "$@" mysqld /etc/mysql/my.cnf; }
+
+# error instead of data corruption:
+setd sql_mode STRICT_ALL_TABLES
+setd ft_stopword_file /usr/share/phabricator/resources/sql/stopwords.txt
+setd ft_min_word_len 3
+# mysql full text search for word1 word2 will and them instead of or them:
+setd ft_boolean_syntax "' |-><()~*:\"\"&^'"
+# default is 128M. recommended starting point is 40% of ram.
+setd innodb_buffer_pool_size 1600M
+
+# this files stopwork, and min_word_len
+mysql -u root -p$dbpass <<'EOF'
+REPAIR TABLE phabricator_search.search_documentfield;
+EOF
+
+fsetd pygments.enabled true
+fbin config set security.alternate-file-domain https://$alt_domain
+
+setini opcache.validate_timestamps '"0"' opcache /etc/php5/apache2/php.ini
+setini post_max_size 100M PHP /etc/php5/apache2/php.ini
+
+fsetd metamta.default-address phabricator@$domain
+fsetd metamta.domain $domain
+
+
+ser restart mysql
+
+# Not sure if this is needed. while developing this script, mysql went down
+# for a bit and the daemons died.
+
+
+# todo, setup inbound email:
+# https://secure.phabricator.com/book/phabricator/article/configuring_inbound_email/
+
+
+# https://secure.phabricator.com/book/phabricator/article/diffusion_hosting/
+# unmatchable password, allows login only via ssh, sudo, etc.
+# this is standard.
+# I tried having no home dir, (-d /nonexistent),
+# but I got an error message on test sshing,
+sudo useradd -p '*' -m --system -s /bin/sh vcs || [[ $? == 9 ]]
+
+# you'd think the debian package would set this. todo: check on a fresh
+# machine
+fbin config set phd.user phabricator
+fbin config set diffusion.ssh-user vcs
+
+option="ALL=(phabricator) SETENV: NOPASSWD:"
+www_files=$(which git hg|sed ':a;N;s/\n/, /;ta')
+vcs_files=$(which git git-upload-pack git-receive-pack hg|sed ':a;N;s/\n/, /;ta')
+[[ $www_files && $vcs_files ]] || exit 1
+www_files="$www_files, /usr/lib/git-core/git-http-backend"
+sudo dd of=/etc/sudoers.d/phabricator <<EOF
+www-data $option $www_files
+vcs $option $vcs_files
+EOF
+
+# Found this due to red x in the ui after setting up a test repo.
+# todo: debian package should do this for us. see also:
+# https://phab.iank.bid/config/edit/environment.append-paths/
+sudo lnf /usr/lib/git-core/git-http-backend /usr/share/phabricator/support/bin
+
+fbin config set diffusion.allow-http-auth true
+
+# couldn't find a really appropriate place for it. It needs parent dir
+# permissions to be root:root.
+file=/usr/share/phabricator-local-ssh-hook.sh
+# from /usr/share/phabricator/resources/sshd/phabricator-ssh-hook.sh
+sudo dd of=$file <<'EOF'
+#!/bin/sh
+# For debugging, you can temporarily do:
+# exec >/tmp/plog 2>&1
+# This script executes as the vcs user
+if [ "$1" != vcs ]; then exit 1; fi
+exec "/usr/share/phabricator/bin/ssh-auth" $@
+EOF
+sudo chmod 755 $file
+
+sudo dd of=/etc/ssh/sshd_config.phabricator <<EOF
+AuthorizedKeysCommand $file
+AuthorizedKeysCommandUser vcs
+AllowUsers vcs
+
+Port $ssh_port
+Protocol 2
+PermitRootLogin no
+AllowAgentForwarding no
+AllowTcpForwarding no
+PrintMotd no
+PrintLastLog no
+PasswordAuthentication no
+AuthorizedKeysFile none
+
+PidFile /var/run/sshd-phabricator.pid
+EOF
+
+sudo dd of=/etc/systemd/system/phabricator-ssh.service <<'EOF'
+[Unit]
+Description=OpenBSD Secure Shell server for phabricator repos
+After=network.target auditd.service
+ConditionPathExists=!/etc/ssh/sshd_not_to_be_run
+
+[Service]
+ExecStart=/usr/sbin/sshd -f /etc/ssh/sshd_config.phabricator
+ExecReload=/bin/kill -HUP $MAINPID
+KillMode=process
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+sudo systemctl daemon-reload
+
+# got this error upon ssh, figured out a solution.
+# [2016-06-10 06:40:15] EXCEPTION: (AphrontInvalidCredentialsQueryException) #1045: Access denied for user 'root'@'localhost' (using password: NO) at [<phutil>/src/aphront/storage/connection/mysql/AphrontBaseMySQLDatabaseConnection.php:306]
+# arcanist(), phabricator(), phutil()
+
+s usermod -a -G vcs www-data
+s usermod -a -G vcs iank
+s usermod -a -G vcs phabricator
+s chown root:vcs /usr/share/phabricator/conf/local/local.json
+fbin config set diffusion.ssh-port $ssh_port
+
+fsetd policy.allow-public true
+
+sgo phabricator-ssh
+
+ser restart apache2
+sgo phabricator
+
+
+# todo, finish next steps here:
+# notably, backup/restore
+# https://secure.phabricator.com/book/phabricator/article/configuration_guide/
+
+
+fbin auth recover iank
+
+cat <<EOF
+# go to link above, then
+# https://$domain/auth/config/new/
+# and add username/pass auth provider.
+EOF
+
+
+
+# beginnings of automating those last manual steps:
+
+
+# for setting the auto provider, we can use the api.
+#arc set-config default https://$domain
+#
+# but first we have to generate an api key by getting
+# https://phab.iank.bid/conduit/login/
+# to do that, we've got to login to the url login.
+# We've got to post to a url on the login page,
+# then record 2 cookies: phuser and phsid
+# It also does a 302 for us to do 2 more pages related to auth/login.
+
+# we need to post to the right url (didn't record it, with these params)
+#allowLogin:"1"
+#allowRegistration:"1"
+#allowLink:"1"
+#allowUnlink:"1"
+
+
+#Serve over HTTP
+#
+#
+# phabricator/ $ ./bin/repository edit rT --as iank --local-path ...
+
+#
--- /dev/null
+#!/bin/bash
+
+# this is from distro-end
+
+if [[ $HOSTNAME == treetowl ]]; then
+ pi samba
+ # note samba re-reads it\'s config every 1 minute
+ case $distro in
+ arch) s cp /etc/samba/smb.conf.default /etc/samba/smb.conf ;;
+ esac
+
+ # add 2 lines after workgroup option
+ s sed -ri --follow-symlinks '/^\s*encrypt passwords\s*=/d' /etc/samba/smb.conf
+ s sed -ri --follow-symlinks '/^\s*map to guest\s*=/d' /etc/samba/smb.conf
+ s sed -i --follow-symlinks 's/\(\s*workgroup\s*=\).*/\1 WORKGROUP\n\tencrypt passwords = yes\n\tmap to guest = bad password/' /etc/samba/smb.conf
+ # remove default homes section. not sharing that.
+ s sed -ri --follow-symlinks '/^\s*\[homes\]/,/\s*\[/d' /etc/samba/smb.conf
+
+ if ! grep -xF '[public]' /etc/samba/smb.conf &>/dev/null; then
+ s tee -a /etc/samba/smb.conf <<'EOF'
+[public]
+ guest ok = yes
+ read only = no
+ path = /kr
+EOF
+ fi
+
+ case $distro in
+ debian|trisquel|ubuntu)
+ # systemd claims it generates units from /etc/init.d, but it
+ # clearly doesn\'t in debian. I have no idea how they are
+ # related. fuck debian right now. It\'s not documented. samba
+ # has a systemd init file linked to /dev/null. There\'s this
+ # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=769714 which
+ # claims samba\'s sub-services will be started automatically by
+ # systemd... it didn\'t on install, wonder if it will on
+ # boot. It clued me in how to start it manually though. Nothing
+ # in /usr/share/doc/samba, debian admin guide says nothing about
+ # any of this. (this is in debian testing as of 4/2016).
+
+ s /etc/init.d/samba start
+ ;;
+ arch)
+ sgo samba
+ ;;
+ esac
+fi
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+
+
+# for bootstrapping a new machine
+
+# in case we need it,
+# to make ssh interactive shell run better, we run this first.
+sudo bash -c 'source /a/c/.bashrc && source /a/exe/ssh-emacs-setup'
+
+
+# usage: $0 [-r] HOSTNAME
+
+# tips:
+# run any sudo command first so your pass is cached
+# set the scrollback to unlimited in case something goes wrong
+
+if [[ $EUID == 0 ]]; then
+ if getent passwd iank || getent passwd ian ; then
+ echo "$0: error: running as root. unprivileged user exists. use it."
+ exit 1
+ else
+ echo "$0: warning: running as root. I will setup users then exit"
+ fi
+fi
+
+interactive=true # set this to false to force set -x
+[[ $- == *i* ]] || interactive=false
+
+if ! $interactive; then
+ set -x
+ set -e -o pipefail
+fi
+set -E
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?"' ERR
+
+exec &> >(sudo tee -a /var/log/distro-begin)
+echo "$0: $(date): starting now)"
+
+# headless=false # unused atm
+recompile=false
+# for copying to a new data fs
+bootstrapfs=false # old flag, needs new look before using.
+while [[ $1 == -* ]]; do
+ case $1 in
+ -r) recompile=true; shift ;;
+ esac
+done
+
+if [[ $1 ]]; then
+ export HOSTNAME=$1
+fi
+
+for f in iank-dev htpc treetowl x2 frodo tp li lj demohost kw fz; do
+ eval "$f() { [[ $HOSTNAME == $f ]]; }"
+done
+has_p() { ! linode; } # when tp is tracis, then not tp either
+has_x() { ! linode; }
+linode() { lj || li; }
+has_btrfs() { ! linode; }
+home_network() { ! linode; }
+encrypted() { has_p; }
+
+shopt -s extglob
+export GLOBIGNORE=*/.:*/..
+umask 0002
+
+
+####### end command line parsing
+
+PATH="/a/exe:$PATH"
+sed="sed --follow-symlinks"
+
+##### begin setup encryption scripts ######
+if encrypted; then
+ # I tried making a service which was dependent on reboot.target,
+ # but it happened too late in the shutdown process.
+ sudo dd of=/etc/systemd/system/keyscripton.service <<'EOF'
+[Unit]
+Description=Turn on automatic decryption of drives on boot
+# tried using graphical.target, but it made my display manager restart before rebooting.
+# generally, I don't think targets order shutdown like they do startup.
+# So, I did systemd-analyze plot > something.svg, and picked a reliably started
+# service that happens late in the game.
+After=ntp.service
+DefaultDependencies=no
+# not sure if needed, makes sure we shut down before reboot.target
+Conflicts=reboot.target
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/bin/true
+ExecStop=/a/exe/keyscript-on
+
+[Install]
+WantedBy=keyscriptoff.service
+EOF
+ sudo systemctl daemon-reload # needed if the file was already there
+ sudo systemctl stop keyscripton.service
+ # sudo systemctl start keyscripton.service
+ sudo systemctl enable keyscripton.service
+
+ sudo dd of=/etc/systemd/system/keyscriptoff.service <<'EOF'
+[Unit]
+Description=Turn off automatic decryption of drives on boot
+
+[Service]
+Type=oneshot
+ExecStart=/a/exe/keyscript-off
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ sudo systemctl daemon-reload # needed if the file was already there
+ sudo systemctl enable keyscriptoff.service
+ sudo systemctl start keyscriptoff.service
+fi
+##### end setup encryption scripts ######
+
+
+install-myqueue
+
+# this script has been designed to be idempotent
+# todo, it would be nice to cut down on some of the output
+
+
+for x in /a/bin/errhandle/*-function; do
+ source $x
+done
+
+
+set +e
+$interactive || errcatch
+set +x
+source /a/bin/distro-functions/src/identify-distros
+$interactive || set -x
+
+if isfedora; then
+ # comment out line disallowing calling sudo in scripts
+ sudo $sed -i 's/^Defaults *requiretty/#\0 # ian commented/' /etc/sudoers
+ # turn on magic sysrq commands for this boot cycle
+ echo 1 > sudo dd of=/proc/sys/kernel/sysrq
+ echo "kernel.sysrq = 1" > /etc/sysctl.d/90-sysrq.conf
+ # selinux is not user friendly. Like, you enable samba, but you haven't run the magic selinux commands so it doesn't work
+ # and you have no idea why.
+ sudo $sed -i 's/^\(SELINUX=\).*/\1disabled/' /etc/selinux/config
+ selinuxenabled && sudo setenforce 0
+fi
+
+
+# already ran for pxe installs, but used for vps & updates
+distro=$(distro-name)
+case $distro in
+ ubuntu|debian|trisquel)
+ sudo bash -c ". /a/bin/fai/fai-wrapper && /a/bin/fai/fai/config/scripts/GRUB_PC/11-iank"
+ ;;
+ *)
+ sudo bash -c ". /a/bin/fai/fai-wrapper &&
+/a/bin/fai/fai/config/distro-install-common/end"
+ ;;
+esac
+
+sudo $sed -i '/^127\.0\.1\.1/d' /etc/hosts
+echo "127.0.1.1 $HOSTNAME.b8.nz $HOSTNAME" | sudo tee -a /etc/hosts
+
+
+if [[ $EUID == 0 ]]; then
+ echo "$0: running as root. exiting now that users are setup"
+ exit 0
+fi
+
+
+#### begin link bashrc for root ######
+for x in /a/c/{.bashrc,brc,.bash_profile,.profile,.inputrc,path_add_function}; do
+ sudo -i <<EOF
+PATH="/a/exe:$PATH"
+lnf $x /root
+EOF
+done
+#### end link bashrc repo for root ######
+
+# li needs the bind group before conflink
+if [[ $HOSTNAME == li ]]; then
+ getent group bind &>/dev/null || sudo groupadd -r bind
+fi
+# this needs to be before installing pacserve so we have gpg conf.
+conflink
+
+set +x
+errallow
+source /etc/profile.d/environment.sh
+source ~/.bashrc
+$interactive || errcatch
+$interactive || set -x
+
+
+# passwordless sudo
+tu /etc/sudoers <<EOF
+$USER ALL=(ALL) NOPASSWD: ALL
+Defaults env_keep += SUDOD
+# makes ubuntu be like debian
+# https://unix.stackexchange.com/a/91572
+Defaults always_set_home
+EOF
+
+
+# enable magic sysrq keys. debian docs say it is already enabled by default
+isfedora && tu /etc/sysctl.conf 'kernel.sysrq = 1'
+
+
+if isdeb; then
+ codename=$(debian-codename)
+ ## ian: disabled. backports are not being published atm due to rust packaging issue
+ # if isdebian-stable && has_x; then
+ # s dd of=/etc/apt/sources.list.d/mozilla-iceweasel.list <<EOF
+ # deb http://mozilla.debian.net/ $codename-backports firefox-release
+ # deb-src http://mozilla.debian.net/ $codename-backports firefox-release
+ # EOF
+ # p update
+ # # take care of mozilla signing errors in previous command
+ # pi pkg-mozilla-archive-keyring
+ # fi
+ p update
+
+fi
+
+
+###### begin arch aur wrapper ########
+if isarch; then
+ #https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages
+ sudo pacman -S --noconfirm --needed base-devel jq
+ # pacaur seems to be the best, although it + cower has a few minor bugs,
+ # its design goals seem good, so, going for it.
+
+ aurpi() {
+ for p in "$@"; do
+ tempdir=$(mktemp -d)
+ pushd $tempdir
+ aurex "$p"
+ makepkg -sri --skippgpcheck --noconfirm
+ popd
+ rm -rf $tempdir
+ done
+ }
+ aurpi cower pacaur
+
+ pi pacserve
+
+ x=$(mktemp); /usr/bin/pacman.conf-insert_pacserve >$x
+ sudo dd of=/etc/pacman.conf if=$x; rm $x
+ sudo systemctl enable pacserve.service
+ sudo systemctl start pacserve.service
+
+fi
+###### end arch aur wrapper ########
+
+pup
+
+
+###### begin trash cli install ######
+if isarch; then
+ # strange error if just installing trash-cli: "pyalpm requires python",
+ # so I see that it requires python2, and installing that manually fixes it.
+ # I didn\'t see this on earlier installation, main thing which changed was
+ # pacserve, so not sure if it\'s related.
+ pi python2
+fi
+pi trash-cli
+###### end trash cli install ######
+
+
+######## begin fix evbug bug ######
+case $distro in
+ trisquel|ubuntu)
+ # noticed in flidas.
+ #https://bugs.launchpad.net/ubuntu/+source/module-init-tools/+bug/240553
+ #https://wiki.debian.org/KernelModuleBlacklisting
+ #common advice when searching is to use /etc/modprobe.d/blacklist.conf,
+ #but that file won't work and will get automatically reverted
+ sudo rmmod evbug ||: # might not be loaded yet
+ file=/etc/modprobe.d/evbug.conf
+ line="blacklist evbug"
+ if ! grep -xFq "$line" $file; then
+ sudo dd of=$file 2>/dev/null <<<"$line"
+ sudo depmod -a
+ sudo update-initramfs -u
+ fi
+ ;;
+esac
+######## end fix evbug bug ######
+
+
+
+###### begin link files ###########
+# convenient to just do all file linking in one place
+
+
+s lnf -T /a/bin /b
+s lnf -T /nocow/t /t
+
+if has_p; then
+ lnf -T /p/News ~/News
+fi
+
+s lnf /q/root/.editor-backups /q/root/.undo-tree-history \
+ /a/opt /a/c/.emacs.d $HOME/mw_vars /k/backup /root
+
+rootsshsync
+
+s lnf /a/c/.vim /a/c/.vimrc /a/c/.gvimrc /root
+
+# machine is going away
+# if [[ $HOSTNAME == htpc ]]; then
+# lnf -T /i/Videos ~/Downloads
+# fi
+
+if has_p; then
+ # for dovecot
+ lnf -T /i/k/mboxes ~/mail
+fi
+
+###### end link files ###########
+
+
+##### basic needed packages
+
+### begin setup for keyboard and redshift ###
+case $(distro-name) in
+
+ trisquel|ubuntu|debian)
+ if has_x; then
+ if isdebian-testing; then
+ pi xmacro/unstable # has no unstable deps
+ else
+ pi xmacro
+ fi
+ pi xinput
+ # recommends gets us geoclue (for darkening automatically at night i assume),
+ # which recommends modemmanager, which is annoying to fix for the model01 keyboard.
+ pi --no-install-recommends gtk-redshift
+ fi
+ ;;&
+ fedora)
+ p -y groupinstall development-tools c-development books admin-tools
+ pi wget man-pages
+ if has_x; then
+ pi redshift-gtk
+ # debian has this package patched to work, upstream is dead
+ # tried using alien, pi alien, alien -r *.deb, rpm -Uhv *.rpm, got this error, so fuck it
+ # file /usr/bin from install of xmacro-0.3pre_20000911-7.x86_64 conflicts with file from package filesystem-3.2-19.fc20.x86_64
+ # http://packages.debian.org/source/sid/xmacro
+ pi patch libXtst-devel
+ cd $(mktemp -d)
+ wget http://ftp.de.debian.org/debian/pool/main/x/xmacro/xmacro_0.3pre-20000911.orig.tar.gz
+ wget http://ftp.de.debian.org/debian/pool/main/x/xmacro/xmacro_0.3pre-20000911-6.diff.gz
+ ex *.gz
+ patch -p0 < xmacro_0.3pre-20000911-6.diff
+ cd xmacro-0.3pre-20000911.orig
+ make
+ sleep 1 # not sure why the following command couldn\'t find, so trying this
+ # no make install target
+ s cp -f xmacroplay xmacrorec xmacrorec2 /usr/local/bin
+ fi
+ ;;&
+ arch)
+ # like apt-cache
+ pi pkgfile
+ s pkgfile --update
+ if has_x; then
+ # libxtst is missing dep https://aur.archlinux.org/packages/xmacro/#news
+ pi xorg-server redshift xorg-xinput libxtst xmacro
+
+ # background:
+ # https://aur.archlinux.org/packages/xkbset/#comment-545419
+ cert=$(mktemp)
+ cat >$cert <<'EOF'
+-----BEGIN CERTIFICATE-----
+MIIJADCCB+igAwIBAgIRAIVAhZ0TMbQ5jTm0koI8X6YwDQYJKoZIhvcNAQELBQAw
+djELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1JMRIwEAYDVQQHEwlBbm4gQXJib3Ix
+EjAQBgNVBAoTCUludGVybmV0MjERMA8GA1UECxMISW5Db21tb24xHzAdBgNVBAMT
+FkluQ29tbW9uIFJTQSBTZXJ2ZXIgQ0EwHhcNMTUxMjA4MDAwMDAwWhcNMTgxMjA3
+MjM1OTU5WjCBsTELMAkGA1UEBhMCVVMxDjAMBgNVBBETBTY1MjExMREwDwYDVQQI
+EwhNaXNzb3VyaTERMA8GA1UEBxMIQ29sdW1iaWExHzAdBgNVBAkTFjExMDAgQ2Fy
+cmllIEZyYW5ja2UgRHIxHzAdBgNVBAoTFlVuaXZlcnNpdHkgb2YgTWlzc291cmkx
+CzAJBgNVBAsTAk1VMR0wGwYDVQQDExRmYWN1bHR5Lm1pc3NvdXJpLmVkdTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN8Kap8hASpxQeqjHibGsCR1PBkh
+nW9p5FkuhGpMW/3ko8QfxH0W1Hq2y2DTFUmq17kH3GfT3h9a7HcmUrC3q15PciOB
+WR3j8u0bDfVppyAZXiHJzYGN7xHiPrZtFEGgwZd28+sW80WXTbGl+zKkmeZguGdH
+AVGeWJEFK44ctLbpjHWCy+xNuhxJuL4olwPoV7WX9IUhceC0rxYQANhLGOJhbchj
+Z76MA8dc2K3CZI5m7VqQwl09QSnCfz00afUr88ny9vj1S5k2ADS46gaE9O0lM6EY
+z/uZvMizXN/4ko+hFBjCSt0Vhxjx0kYDSP15btiwh700ywBEubpvLROmd48CAwEA
+AaOCBUswggVHMB8GA1UdIwQYMBaAFB4Fo3ePbJbiW4dLprSGrHEADOc4MB0GA1Ud
+DgQWBBTTNWrSb+V/Ayy0i8W2LExMUisQMzAOBgNVHQ8BAf8EBAMCBaAwDAYDVR0T
+AQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwZwYDVR0gBGAw
+XjBSBgwrBgEEAa4jAQQDAQEwQjBABggrBgEFBQcCARY0aHR0cHM6Ly93d3cuaW5j
+b21tb24ub3JnL2NlcnQvcmVwb3NpdG9yeS9jcHNfc3NsLnBkZjAIBgZngQwBAgIw
+RAYDVR0fBD0wOzA5oDegNYYzaHR0cDovL2NybC5pbmNvbW1vbi1yc2Eub3JnL0lu
+Q29tbW9uUlNBU2VydmVyQ0EuY3JsMHUGCCsGAQUFBwEBBGkwZzA+BggrBgEFBQcw
+AoYyaHR0cDovL2NydC51c2VydHJ1c3QuY29tL0luQ29tbW9uUlNBU2VydmVyQ0Ff
+Mi5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wggOg
+BgNVHREEggOXMIIDk4IUZmFjdWx0eS5taXNzb3VyaS5lZHWCGmFkdmlzaW5nLmNv
+YXMubWlzc291cmkuZWR1ghBhaGEubWlzc291cmkuZWR1ghZhbGwtY3JhZnQubWlz
+c291cmkuZWR1gh1hbWVyaWNhbmJhc2tldHJ5Lm1pc3NvdXJpLmVkdYIXYW5kcmVh
+cmlldy5taXNzb3VyaS5lZHWCFWFydGdyYWRzLm1pc3NvdXJpLmVkdYIYYmFja3Vw
+LmNvYXMubWlzc291cmkuZWR1ghBiaWMubWlzc291cmkuZWR1ghZibG9nLmNvYXMu
+bWlzc291cmkuZWR1ghVjb3dhbmxhYi5taXNzb3VyaS5lZHWCFWRhZS5zdGF0Lm1p
+c3NvdXJpLmVkdYIRZGljZS5taXNzb3VyaS5lZHWCIGRpZ2l0YWxzdG9yeXRlbGxp
+bmcubWlzc291cmkuZWR1gg9lYS5taXNzb3VyaS5lZHWCG2Vib29rLWRldi5tYXRo
+Lm1pc3NvdXJpLmVkdYIXZWJvb2suZWNvbi5taXNzb3VyaS5lZHWCGGVuZ2xpc2g4
+MDA2Lm1pc3NvdXJpLmVkdYIZZXVnZW5lZml0c2NoLm1pc3NvdXJpLmVkdYIYZXVy
+b2t1bHR1cmUubWlzc291cmkuZWR1ghNmY2RsYWIubWlzc291cmkuZWR1ghZnZW9t
+dXNldW0ubWlzc291cmkuZWR1ghRoYXJzdGFkLm1pc3NvdXJpLmVkdYITbHVkd2ln
+Lm1pc3NvdXJpLmVkdYIYbWFjaGluZXNob3AubWlzc291cmkuZWR1ghNtYWpvcnMu
+bWlzc291cmkuZWR1ghBtZ2EubWlzc291cmkuZWR1ghdvcmdhbnByaW50Lm1pc3Nv
+dXJpLmVkdYIUcGh5c2ljcy5taXNzb3VyaS5lZHWCFHBtLmNoZW0ubWlzc291cmku
+ZWR1ghxyZWNydWl0aW5nLmVjb24ubWlzc291cmkuZWR1ghdyZXBlYy5lY29uLm1p
+c3NvdXJpLmVkdYIUc2NhbmxhYi5taXNzb3VyaS5lZHWCFnNzc2MuY29hcy5taXNz
+b3VyaS5lZHWCF3RlYWNoLmNvYXMubWlzc291cmkuZWR1ghd0b3B0ZWFjaGVyLm1p
+c3NvdXJpLmVkdYIQdnNmLm1pc3NvdXJpLmVkdYIid2hpdGVwYXBlci5ncmFkc2No
+b29sLm1pc3NvdXJpLmVkdTANBgkqhkiG9w0BAQsFAAOCAQEAQutYVAqG7MpmG2Nu
+Z/UypjYkN4JvwRbKBpTrce2IT/Sy29x6chBbyD+0WE6QORBtaUHuzE1KoXqpnF4M
+QrkKw0oBAC6x9dISoomq0DkIndtoBYYLaxSoII6F4OGWgF7pQ/7MiCBYzsKQpn9t
+aofMcTfvnCjq+MCIaeYnUKBVww0lOJlUxZGKxFJvRpf78HfbBauojjRO2zXLZD/u
+KMspbTfDaj5etIgWGShY2eml3N/SjAENmZYkcgDBYFyi8CckcEBAVzpH1+D+7Anz
+txHSYDNHAYLv83MwbegApa1FwPqlG/4SdEU8G6e6Xf5GLC/6GPGVTUpr7o348OOO
+lzGQzw==
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF+TCCA+GgAwIBAgIQRyDQ+oVGGn4XoWQCkYRjdDANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTQx
+MDA2MDAwMDAwWhcNMjQxMDA1MjM1OTU5WjB2MQswCQYDVQQGEwJVUzELMAkGA1UE
+CBMCTUkxEjAQBgNVBAcTCUFubiBBcmJvcjESMBAGA1UEChMJSW50ZXJuZXQyMREw
+DwYDVQQLEwhJbkNvbW1vbjEfMB0GA1UEAxMWSW5Db21tb24gUlNBIFNlcnZlciBD
+QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJwb8bsvf2MYFVFRVA+e
+xU5NEFj6MJsXKZDmMwysE1N8VJG06thum4ltuzM+j9INpun5uukNDBqeso7JcC7v
+HgV9lestjaKpTbOc5/MZNrun8XzmCB5hJ0R6lvSoNNviQsil2zfVtefkQnI/tBPP
+iwckRR6MkYNGuQmm/BijBgLsNI0yZpUn6uGX6Ns1oytW61fo8BBZ321wDGZq0GTl
+qKOYMa0dYtX6kuOaQ80tNfvZnjNbRX3EhigsZhLI2w8ZMA0/6fDqSl5AB8f2IHpT
+eIFken5FahZv9JNYyWL7KSd9oX8hzudPR9aKVuDjZvjs3YncJowZaDuNi+L7RyML
+fzcCAwEAAaOCAW4wggFqMB8GA1UdIwQYMBaAFFN5v1qqK0rPVIDh2JvAnfKyA2bL
+MB0GA1UdDgQWBBQeBaN3j2yW4luHS6a0hqxxAAznODAOBgNVHQ8BAf8EBAMCAYYw
+EgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
+AwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgGBmeBDAECAjBQBgNVHR8ESTBHMEWgQ6BB
+hj9odHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQ2VydGlmaWNh
+dGlvbkF1dGhvcml0eS5jcmwwdgYIKwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNo
+dHRwOi8vY3J0LnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQWRkVHJ1c3RDQS5j
+cnQwJQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wDQYJKoZI
+hvcNAQEMBQADggIBAC0RBjjW29dYaK+qOGcXjeIT16MUJNkGE+vrkS/fT2ctyNMU
+11ZlUp5uH5gIjppIG8GLWZqjV5vbhvhZQPwZsHURKsISNrqOcooGTie3jVgU0W+0
++Wj8mN2knCVANt69F2YrA394gbGAdJ5fOrQmL2pIhDY0jqco74fzYefbZ/VS29fR
+5jBxu4uj1P+5ZImem4Gbj1e4ZEzVBhmO55GFfBjRidj26h1oFBHZ7heDH1Bjzw72
+hipu47Gkyfr2NEx3KoCGMLCj3Btx7ASn5Ji8FoU+hCazwOU1VX55mKPU1I2250Lo
+RCASN18JyfsD5PVldJbtyrmz9gn/TKbRXTr80U2q5JhyvjhLf4lOJo/UzL5WCXED
+Smyj4jWG3R7Z8TED9xNNCxGBMXnMete+3PvzdhssvbORDwBZByogQ9xL2LUZFI/i
+eoQp0UM/L8zfP527vWjEzuDN5xwxMnhi+vCToh7J159o5ah29mP+aJnvujbXEnGa
+nrNxHzu+AGOePV8hwrGGG7hOIcPDQwkuYwzN/xT29iLp/cqf9ZhEtkGcQcIImH3b
+oJ8ifsCnSbu0GB9L06Yqh7lcyvKDTEADslIaeSEINxhO2Y1fmcYFX/Fqrrp1WnhH
+OjplXuXE0OPa0utaKC25Aplgom88L2Z8mEWcyfoB7zKOfD759AN7JKZWCYwk
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+EOF
+ cat /etc/ssl/certs/ca-certificates.crt >> $cert
+ CURL_CA_BUNDLE=$cert pi xkbset
+ fi
+
+ ;;&
+ ubuntu|trisquel|debian|fedora)
+ if has_x; then
+ if isdebian-testing; then
+ # xkbset was in testing for quite a while, dunno
+ # why it\'s not anymore. Sometime I should check and
+ # see if it\'s back in testing, but the unstable package
+ # doesn\'t upgrade anything form testing, and it\'s tiny
+ # so I\'m not bothering to automate it.
+ pi xkbset/unstable
+ else
+ pi xkbset
+ fi
+ fi
+ ;;&
+esac
+if has_x; then
+ pi xbindkeys
+fi
+### end setup for keyboard and redshift ###
+
+
+pi cryptsetup lvm2
+# enables trim for volume delete, other rare commands.
+sudo $sed -ri 's/( *issue_discards\b).*/\1 = 1/' /etc/lvm/lvm.conf
+
+if encrypted; then
+ if isdeb; then
+ sudo cp /usr/share/doc/util-linux/examples/fstrim.{service,timer} /etc/systemd/system
+ fi
+ # does weekly trim
+ sudo systemctl enable fstrim.timer
+fi
+
+dirs=(/mnt/{1,2,3,4,5,6,7,8,9} /nocow/t)
+s mkdir -p "${dirs[@]}"
+s chown $USER:$USER "${dirs[@]}"
+
+
+tu /etc/fstab <<'EOF'
+/i/w /w none bind,noauto 0 0
+/i/k /k none bind,noauto 0 0
+EOF
+
+
+if ! mountpoint /kr; then
+ s mkdir -p /kr
+ s chown $USER:traci /kr
+fi
+
+if home_network; then
+ if [[ $HOSTNAME == frodo ]]; then
+ tu /etc/fstab <<'EOF'
+/k /kr none bind,noauto 0 0
+EOF
+ else
+ tu /etc/fstab <<'EOF'
+frodo:/k /kr nfs noauto 0 0
+EOF
+ fi
+fi
+
+s mkdir -p /q /i/{w,k}
+for dir in /{i,w,k}; do
+ if mountpoint $dir; then continue; fi # already mounted
+ s mkdir -p $dir
+ s chown $USER:$USER $dir
+done
+
+# not needed for all hosts, but rather just keep it uniform
+s mkdir -p /mnt/iroot
+
+# debian auto mounting of multi-disk encrypted btrfs is busted. It is
+# in jessie, and in stretch as of 11/26/2016 I have 4 disks in cryptab,
+# based on 3 of those, it creates .device units for /dev/mapper/dev...
+# then waits endlessly for them on bootup, after the /dev/mapper disks
+# have already been created and exist. todo: create a simple repro
+# for this in a vm and report it upstream.
+if has_btrfs || home_network; then
+ pi nfs-common
+ s dd of=/root/imount <<'EOF'
+#!/bin/bash
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+for dir in /i /mnt/iroot /k /kr /w; do
+ if ! mountpoint $dir &>/dev/null && \
+ awk '{print $2}' /etc/fstab | grep -xF $dir &>/dev/null; then
+ if awk '{print $3}' /etc/fstab | grep -xF nfs &>/dev/null; then
+ mount $dir || echo "warning: failed to mount nfs on $dir"
+ else
+ mount $dir
+ fi
+ fi
+done
+EOF
+ s chmod +x /root/imount
+
+ s dd of=/etc/systemd/system/imount.service <<EOF
+[Unit]
+Description=Mount /i and related mountpoints
+Before=syncthing@$USER.service
+
+[Service]
+Type=oneshot
+ExecStart=/root/imount
+
+[Install]
+RequiredBy=syncthing@$USER.service
+# note /kr needs networking, this target is the simplest way to
+# time it when the network should be up, but not do something
+# dumb like delay startup until the network is up. It happens
+# at some time after network.target
+WantedBy=multi-user.target
+EOF
+ sudo systemctl daemon-reload # needed if the file was already there
+ sudo systemctl enable imount.service
+ sudo systemctl start imount.service
+fi
+
+dir=/nocow
+if has_btrfs; then
+ if ! mountpoint $dir; then
+ subvol=/mnt/root/nocow
+ if [[ ! -e $subvol ]]; then
+ s btrfs subvolume create $subvol
+ s chown root:1000 $subvol
+ s chattr +C $subvol
+ fi
+
+ first_root_crypt=$(awk '$2 == "/" {print $1}' /etc/mtab)
+ tu /etc/fstab <<EOF
+$first_root_crypt /nocow btrfs noatime,subvol=nocow 0 0
+EOF
+ s mkdir -p $dir
+ s chown $USER:$USER $dir
+ s mount $dir
+ fi
+else
+ sudo mkdir -p $dir
+fi
+
+# ssh and probably some other things care about parent directory
+# ownership, and ssh doesn\'t allow any group writable parent
+# directories, so we are forced to use a directory structure similar
+# to home directories
+s chown root:$USER /q
+s chmod 755 /q
+
+
+# it comes with stretch and arch, but not jessie.
+# propogate /etc/udev/hwdb.d
+if which systemd-hwdb; then
+ s systemd-hwdb update
+ ser restart systemd-udev-trigger
+fi
+
+
+if isdeb; then
+ # I\'ve had problems with postfix on debian:
+ # on stretch, a startup ordering issue caused all mail to fail.
+ # postfix changed defaults to only use ipv6 dns, causing all my mail to fail.
+ # exim4 is default on debian, so I assume it would
+ # be packaged better to avoid these types of things.
+ # I haven\'t gotten around to getting a non-debian exim
+ # setup.
+ mail-setup exim4
+else
+ mail-setup postfix
+fi
+
+if isubuntu; then
+ # disable crash report annoying dialogs.
+ s dd of=/etc/default/apport <<<'enabled=0'
+fi
+
+# fai sets this an old way that doesn't work for stretch.
+# no harm in setting it universally here.
+# using debconf-set-selection, the area gets reset to ETC
+# on my linode test machine after doing a dpkg-reconfigure, or a reinstall,
+# so we are using expect :(
+# I got a random error when running this, so I added a sleep
+# rather than trying to write a whole detect and wait loop.
+# E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)
+# E: Unable to lock the administration directory (/var/lib/dpkg/), is another process using it?
+sleep 1
+# todo: this is not idempotent, it fails when running twice, due to prepopulated values.
+# check into unsetting them using debconf-set-selection.
+s apt-get -y install --no-install-recommends expect
+s expect <<EOF ||:
+set force_conservative 0
+spawn dpkg-reconfigure tzdata -freadline
+expect -nocase timeout {exit 1} "Geographic area:"
+send "\02512\r"
+expect -nocase timeout {exit 1} "Time zone:"
+send "\0255\r"
+expect eof
+exit
+EOF
+
+
+if has_x; then
+ if isarch; then
+ # install so it's build dependencies don't get removed.
+
+ # emacs git build is currently broken
+ if false; then
+ x=$(mktemp -d)
+ pushd $x
+ aurex emacs-git
+ makepkg -si --noconfirm
+ popd
+ rm -rf $x
+ else
+ pi emacs
+ fi
+ pi hunspell hunspell-en
+ else
+ if $recompile; then
+ /a/bin/buildscripts/emacs
+ else
+ /a/bin/buildscripts/emacs --no-r || /a/bin/buildscripts/emacs
+ fi
+ fi
+
+ # todo, figure this out for arch if we ever try out gnome.
+ if ! isarch; then
+ # install for multiple display managers in case we use one
+ if isdeb; then
+ dir=/etc/gdm3
+ elif isfedora; then
+ # fedora didn\'t have the 3.
+ dir=/etc/gdm
+ fi
+ s mkdir -p $dir/PostLogin
+ s command cp /a/bin/distro-setup/desktop-20-autostart.sh $dir/PostLogin/Default
+ s mkdir /etc/lightdm/lightdm.conf.d
+ s dd of=/etc/lightdm/lightdm.conf.d/12-iank.conf <<'EOF'
+[SeatDefaults]
+session-setup-script=/a/bin/distro-setup/desktop-20-autostart.sh
+EOF
+ fi
+
+
+ pi ghc sakura
+ # todo, also note for work comp, scp opt/org-mode bin/build-scripts
+
+ # use the package manger version to install the cabal version
+ pi cabal-install
+ cabal update
+ PATH="$PATH:$HOME/.cabal/bin"
+
+ # todo, on older ubuntu I used cabal xmonad + xfce,
+ # see /a/bin/old-unused/xmonad-cabal.sh
+
+ # trying out the distros versions newer distros
+ pi xmonad
+ if isarch; then
+ # for displaying error messages.
+ # optional dependency in arch, standard elsewhere.
+ pi xorg-xmessage xmonad-contrib xorg-xsetroot xorg-xinit
+
+ # https://wiki.archlinux.org/index.php/Xinitrc
+ for homedir in /home/*; do
+ cp /etc/X11/xinit/xinitrc $homedir/.xinitrc
+ $sed -ri '/^ *twm\b/,$d' $homedir/.xinitrc
+ tee -a $homedir/.xinitrc <<'EOF'
+/a/bin/desktop-20-autostart.sh
+xsetroot -cursor_name left_ptr
+exec xmonad
+EOF
+ done
+ else
+ pi suckless-tools
+ fi
+ pi dmenu
+
+ if tp || x2; then
+ case $distro in
+ debian)
+ pi task-laptop
+ ;;
+ ubuntu|trisquel)
+ # the exact packages that task-laptop would install, since ubuntu
+ # doesn\'t have this virtual in practice package.
+ pi avahi-autoipd bluetooth powertop iw wireless-tools wpasupplicant
+ ;;
+ # todo: other distros unknown
+ esac
+ fi
+fi
+
+# the first pup command can kill off our /etc/ mod, so rerun this
+/a/exe/ssh-emacs-setup
+echo "$0: $(date): ending now"
+exit 0
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+errcatch
+
+set -x
+
+exec &> >(sudo tee -a /var/log/distro-end)
+echo "$0: $(date): starting now)"
+
+src="${BASH_SOURCE%/*}"
+
+# see example of usage to understand.
+end_msg() {
+ local y
+ IFS= read -r -d '' y ||:
+ end_msg_var+="$y"
+}
+
+spa() { # simple package add
+ simple_packages+=($@)
+}
+
+distro=$(distro-name)
+
+pending_reboot=false
+sed="sed --follow-symlinks"
+
+# template
+case $distro in
+esac
+
+pup
+pi aptitude
+
+simple_packages=(
+ htop
+ iptables
+ mailutils
+ nmon
+ rdiff-backup
+ ruby
+ ruby-rest-client
+ tree
+ vim
+ wcd
+ wget
+)
+
+case $HOSTNAME in
+ lj|li) : ;;
+ *)
+ # universal packages
+ # swh-plugins is for karaoke pulsaudio filter.
+ # mutagen for pithos
+ # guvcview set webcam brightness to highest
+ # pidgin-otr, i went into pidgin pluggin settings and generated a key for some accounts
+ # xawtv has webcam cli control. v4lctl bright 80%; v4lctl list
+ # guvcview also adjusts webcam
+ simple_packages+=(
+ adb
+ apache2
+ apache2-doc
+ apt-doc
+ apt-listchanges
+ aptitude-doc-en
+ bash-doc
+ beets
+ beets-doc
+ binutils-doc
+ bind9-doc
+ bind9utils
+ bwm-ng
+ cloc
+ cpulimit
+ cron
+ debootstrap
+ debconf-doc
+ dirmngr
+ dnsutils
+ dnsmasq
+ dtrx
+ duplicity
+ eclipse
+ evince
+ fdupes
+ feh
+ filelight
+ flashrom
+ gawk-doc
+ gcc-doc
+ gdb
+ gdb-doc
+ geoip-bin
+ git-doc
+ git-email
+ gitk
+ glibc-doc
+ goaccess
+ gnome-screenshot
+ guvcview
+ i3lock
+ inetutils-traceroute
+ iperf3
+ iproute2-doc
+ jq
+ kid3-qt
+ kid3-cli
+ konsole
+ libreoffice
+ linphone
+ linux-doc
+ locate
+ lshw
+ make-doc
+ manpages
+ manpages-dev
+ mb2md
+ meld
+ mps-youtube
+ mpv
+ mumble
+ nagstamon
+ ncdu
+ nginx-doc
+ nmap
+ offlineimap
+ oathtool
+ opendkim-tools
+ p7zip
+ paprefs
+ parted-doc
+ pavucontrol
+ pdfgrep
+ perl-doc
+ pianobar
+ pidgin
+ pidgin-otr
+ pry
+ python-autopep8
+ python3-doc
+ qrencode
+ reportbug
+ $(aptitude show ruby | sed -rn 's/Depends: (.*)/\1/p')-doc
+ schroot
+ sqlite3-doc
+ squashfs-tools
+ swh-plugins
+ tar-doc
+ tcpdump
+ telnet
+ transmission-remote-gtk
+ vlc
+ whois
+ wondershaper
+ xawtv
+ xbacklight
+ xprintidle
+ xscreensaver
+ xscreensaver-data-extra
+ xscreensaver-gl
+ xscreensaver-gl-extra
+ )
+ spa $(apt-cache search ruby[.0-9]+-doc| awk '{print $1}')
+ ;;
+esac
+
+
+########### begin section including li ################
+
+conflink
+
+case $distro in
+ arch)
+ # ubuntu 14.04 uses b-cron,
+ # but its not maintained in arch.
+ # of the ones in the main repos, cronie is only one maintained.
+ # fcron appears abandoned software.
+ pi cronie
+ sgo cronie
+ ;;
+ *) : ;; # other distros come with cron.
+esac
+
+
+
+case $distro in
+ *) pi at ;;&
+ arch) sgo atd ;;
+esac
+
+
+case $distro in
+ debian|trisquel|ubuntu) pi curl;;
+ arch) : ;;
+ # fedora: unknown
+esac
+
+case $distro in
+ # tk for gitk
+ arch) spa git tk ;;
+ *) spa git ;;
+esac
+
+case $distro in
+ arch) spa the_silver_searcher ;;
+ debian|trisquel|ubuntu) spa silversearcher-ag ;;
+ # fedora unknown
+esac
+
+case $distro in
+ debian|trisquel|ubuntu) spa ntp;;
+ arch)
+ pi ntp
+ sgo ntpd
+ ;;
+ # others unknown
+esac
+
+
+# no equivalent in other distros:
+case $distro in
+ debian|trisquel|ubuntu)
+ pi aptitude
+ if ! dpkg -s apt-file &>/dev/null; then
+ # this condition is just a speed optimization
+ pi apt-file
+ s apt-file update
+ fi
+ # for debconf-get-selections
+ spa debconf-utils
+ ;;
+esac
+
+
+case $distro in
+ arch|debian|trisquel|ubuntu)
+ spa bash-completion
+ ;;
+ # others unknown
+esac
+
+
+
+
+
+# disable motd junk.
+case $distro in
+ debian)
+ # allows me to pipe with ssh -t, and gets rid of spam
+ # http://forums.debian.net/viewtopic.php?f=5&t=85822
+ # i'd rather disable the service than comment the init file
+ # this says disabling the service, it will still get restarted
+ # but this script doesn't do anything on restart, so it should be fine
+ s dd of=/var/run/motd.dynamic if=/dev/null
+ # stretch doesn't have initscripts pkg installed by default
+ if [[ $(debian-codename) == jessie ]]; then
+ s update-rc.d motd disable
+ fi
+ ;;
+ trisquel|ubuntu)
+ # this isn't a complete solution. It still shows me when updates are available,
+ # but it's no big deal.
+ s t /etc/update-motd.d/10-help-text /etc/update-motd.d/00-header
+ ;;
+esac
+
+# automatic updates
+# reference:
+# https://debian-handbook.info/browse/stable/sect.regular-upgrades.html
+# /etc/cron.daily/apt calls unattended-upgrades
+# /usr/share/doc/unattended-upgrades# cat README.md
+# /etc/apt/apt.conf.d/50unattended-upgrades
+if isdebian; then
+ setup-debian-auto-update
+fi
+
+# we've got a few dependencies later on, so install them now.
+pi eatmydata; PI_PREFIX=eatmydata
+pi "${simple_packages[@]}"
+simple_packages=()
+
+
+### begin docker install ####
+
+if isdeb; then
+ # https://store.docker.com/editions/community/docker-ce-server-debian?tab=description
+ pi software-properties-common apt-transport-https
+ curl -fsSL https://download.docker.com/linux/$(distro-name-compat)/gpg | sudo apt-key add -
+ sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/$(distro-name-compat) \
+ $(debian-codename-compat) \
+ stable"
+ p update
+ pi docker-ce
+ sgo docker
+ # other distros unknown
+fi
+### end docker install ####
+
+
+### begin certbot install ###
+case $distro in
+ debian)
+ # note, need python-certbot-nginx for nginx, but it depends on nginx,
+ # and I'm not installing nginx by default right now.
+ # note python-certbot-apache is in suggests, but so is a doc package that brought in xorg
+ if [[ $(debian-codename) == jessie ]]; then
+ pi -t jessie-backports certbot python-certbot-apache
+ else
+ pi certbot python-certbot-apache
+ fi
+ ;;
+ trisquel|ubuntu)
+ # not packaged in xenial or flidas
+ pi software-properties-common
+ # this fails with:
+ #
+ # gpg: key 75BCA694: public key "Launchpad PPA for certbot" imported
+ # gpg: Total number processed: 1
+ # gpg: imported: 1
+ # gpg: no valid OpenPGP data found.
+ # Failed to add key.
+ #
+ # but it seems to work fine, perhaps it's only failing on the second run.
+ s add-apt-repository -y ppa:certbot/certbot ||:
+ p update
+ pi python-certbot-apache
+ ;;
+ # todo: other distros unknown
+esac
+# make a version of the certbot timer that emails me.
+x=/systemd/system/certbot
+$sed -r -f - /lib$x.timer <<'EOF' |s dd of=/etc${x}mail.timer
+s,^Description.*,\0 mail version,
+EOF
+$sed -r -f - /lib$x.service <<'EOF' |s dd of=/etc${x}mail.service
+s,(ExecStart=)(/usr/bin/certbot),\1/a/bin/log-quiet/sysd-mail-once certbotmail \2 --renew-hook /a/bin/distro-setup/certbot-renew-hook,
+EOF
+ser daemon-reload
+sgo certbotmail.timer
+### end certbot install ###
+
+
+# dogcam setup
+case $HOSTNAME in
+ lj|li)
+ /a/bin/webcam/install-server
+ ;;
+ kw)
+ /a/bin/webcam/install-client
+ ;;
+esac
+
+# website setup
+case $HOSTNAME in
+ lj|li)
+
+ case $HOSTNAME in
+ lj) domain=iank.bid; exit 0 ;;
+ li) domain=iankelling.org ;;
+ esac
+ /a/h/setup.sh $domain
+ /a/h/build.rb
+
+ sudo -E /a/bin/mediawiki-setup/mw-setup-script
+
+ pi-nostart mumble-server
+ s $sed -ri "s/^ *(serverpassword=).*/\1$(< /a/bin/bash_unpublished/mumble_pass)/" /etc/mumble-server.ini
+
+ # do certificate to avoid warning about unsigned cert,
+ # which is overkill for my use, but hey, I'm cool, I know
+ # how to do this.
+ web-conf apache2 mumble.iankelling.org
+ s rm -f /etc/apache2/sites-enabled/mumble.iankelling.org
+ sudo -i <<'EOF'
+export RENEWED_LINEAGE=/etc/letsencrypt/live/mumble.iankelling.org
+/a/bin/distro-setup/certbot-renew-hook
+EOF
+
+ sgo mumble-server
+
+ vpn-server-setup -rd
+ s tee /etc/openvpn/client-config/mail <<'EOF'
+ifconfig-push 10.8.0.4 255.255.255.0
+EOF
+
+ # it\'s strange. docker seems to make the default for forward
+ # be drop, but then I set it to accept and it\'s stuck that way,
+ # I dun know why. But, let\'s make sure we can forward anyways.
+ s DEBIAN_FRONTEND=noninteractive pi iptables-persistent
+ rm /etc/iptables/rules.v6
+ s tee /etc/iptables/rules.v4 <<'EOF'
+*filter
+-A FORWARD -i tun+ -o eth0 -j ACCEPT
+-A FORWARD -i eth0 -o tun+ -j ACCEPT
+COMMIT
+EOF
+
+
+ sudo dd of=/etc/systemd/system/vpnmail.service <<EOF
+[Unit]
+Description=Turns on iptables mail nat
+
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/a/bin/distro-setup/vpn-mail-forward start
+ExecStop=/a/bin/distro-setup/vpn-mail-forward stop
+
+[Install]
+WantedBy=openvpn.service
+EOF
+ ser daemon-reload
+ ser enable vpnmail.service
+ # needed for li's local mail delivery.
+ tu /etc/hosts <<<"10.8.0.4 mail.iankelling.org"
+ if [[ -e /lib/systemd/system/openvpn-server@.service ]]; then
+ vpn_service=openvpn-server@server
+ else
+ vpn_service=openvpn@server
+ fi
+ sgo $vpn_service
+ # setup let's encrypt cert
+ web-conf apache2 mail.iankelling.org
+ s rm /etc/apache2/sites-enabled/mail.iankelling.org{,-redir}.conf
+ ser reload apache2
+
+ domain=cal.iankelling.org
+ web-conf -f 10.8.0.4:5232 - apache2 $domain <<'EOF'
+#https://httpd.apache.org/docs/2.4/mod/mod_authn_core.html#authtype
+# https://stackoverflow.com/questions/5011102/apache-reverse-proxy-with-basic-authentication
+ <Location />
+ Options +FollowSymLinks +Multiviews +Indexes
+ AllowOverride None
+ AuthType basic
+ AuthName "Authentication Required"
+ # setup one time, with root:www-data, 640
+ AuthUserFile "/etc/caldav-htpasswd"
+ Require valid-user
+ </Location>
+EOF
+ # nginx version of above would be:
+ # auth_basic "Not currently available";
+ # auth_basic_user_file /etc/nginx/caldav/htpasswd;
+
+
+ ########## begin pump.io setup ##########
+
+ # once pump adds a logrotation script, turn off nologger,
+ # and add
+ # "logfile": "/var/log/pumpio/pumpio.log",
+ #
+ s dd of=/etc/pump.io.json <<'EOF'
+{
+ "secret": "SECRET_REPLACE_ME",
+ "driver": "mongodb",
+ "params": { "dbname": "pumpio" },
+ "noweb": false,
+ "site": "pump.iankelling.org",
+ "owner": "Ian Kelling",
+ "ownerURL": "https://iankelling.org/",
+ "port": 8001,
+ "urlPort": 443,
+ "hostname": "pump.iankelling.org",
+ "nologger": true,
+ "datadir": "/home/pumpio/pumpdata",
+ "enableUploads": true,
+ "debugClient": false,
+ "disableRegistration": true,
+ "noCDN": true,
+ "key": "/home/pumpio/privkey.pem",
+ "cert": "/home/pumpio/fullchain.pem",
+ "address": "localhost",
+ "sockjs": false
+}
+EOF
+ s sed -i "s#SECRET_REPLACE_ME#$(cat /p/c/machine_specific/li/pump-secret)#" /etc/pump.io.json
+
+ # stretch node is too old
+ # https://nodejs.org/en/download/package-manager/
+ curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
+ pi nodejs graphicsmagick mongodb
+ cd /home/iank
+ if [[ -e pump.io ]]; then
+ cd pump.io
+ git pull
+ else
+ git clone https://github.com/pump-io/pump.io.git
+ cd pump.io
+ fi
+ # note: doing this or the npm install pump.io as root had problems.
+ npm install
+ npm run build
+ # normally, next command would be
+ # s npm install -g odb
+ # but it\'s this until a bug in pump gets fixed
+ # https://github.com/pump-io/pump.io/issues/1287
+ s npm install -g databank-mongodb@0.19.2
+ if ! getent passwd pumpio &>/dev/null; then
+ s useradd -m -s /bin/false pumpio
+ fi
+ sudo -u pumpio mkdir -p /home/pumpio/pumpdata
+ # for testing browser when only listening to localhost,
+ # in the pump.io.json, set hostname localhost, urlPort 5233
+ #ssh -L 5233:localhost:5233 li
+
+ s mkdir -p /var/log/pumpio/
+ s chown pumpio:pumpio /var/log/pumpio/
+
+ web-conf - apache2 pump.iankelling.org <<'EOF'
+# currently a bug in pump that we cant terminate ssl
+ SSLProxyEngine On
+ ProxyPreserveHost On
+ ProxyPass / https://127.0.0.1:8001/
+ ProxyPassReverse / https://127.0.0.1:8001/
+ # i have sockjs disabled per people suggesting that
+ # it won\'t work with apache right now.
+ # not sure if it would work with this,
+ # but afaik, this is pointless atm.
+ <Location /main/realtime/sockjs/>
+ ProxyPass wss://127.0.0.1:8001/main/realtime/sockjs/
+ ProxyPassReverse wss://127.0.0.1:8001/main/realtime/sockjs/
+ </Location>
+EOF
+
+ sudo -i <<'EOF'
+export RENEWED_LINEAGE=/etc/letsencrypt/live/pump.iankelling.org
+/a/bin/distro-setup/certbot-renew-hook
+EOF
+
+ s dd of=/etc/systemd/system/pump.service <<'EOF'
+[Unit]
+Description=pump.io
+After=syslog.target network.target
+
+[Service]
+Type=simple
+User=pumpio
+Group=pumpio
+ExecStart=/home/iank/pump.io/bin/pump
+Environment=NODE_ENV=production
+# failed to find databank-mongodb without this.
+# I just looked at my environment variables took a guess.
+Environment=NODE_PATH=/usr/lib/nodejs:/usr/lib/node_modules:/usr/share/javascript
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ ser daemon-reload
+ sgo pump
+ ########## end pump.io setup ############
+
+
+ ############# begin setup mastodon ##############
+
+ # main doc is Docker-Guide.md in docs repo
+
+ # I'd like to try gnu social just cuz of gnu, but it's not being
+ # well maintained, for example, simple pull requests
+ # languishing:
+ # https://git.gnu.io/gnu/gnu-social/merge_requests/143
+ # and I submitted my own bugs, basic docs are broken
+ # https://git.gnu.io/gnu/gnu-social/issues/269
+
+ # note, docker required, but we installed it earlier
+
+ # i subscrubed to https://github.com/docker/compose/releases.atom
+ # to see release notes.
+ # i had some problems upgrading. blew things away with
+ # docker-compose down
+ # docker rmi $(docker images -q)
+ # s reboot now
+ # when running docker-compose run, kernel stack traces are printed to the journal.
+ # things seem to succeed, google says nothing, so ignoring them.
+ curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` | s dd of=/usr/local/bin/docker-compose
+ s chmod +x /usr/local/bin/docker-compose
+
+
+ cd ~
+ s rm -rf mastodon
+ i clone https://github.com/tootsuite/mastodon
+ cd mastodon
+ # subbed to atom feed to deal with updates
+ git checkout $(git tag | grep -v rc | tail -n1)
+
+ # per instructions, uncomment redis/postgres persistence in docker-compose.yml
+ sed -i 's/^#//' docker-compose.yml
+
+ cat >.env.production <<'EOF'
+REDIS_HOST=redis
+REDIS_PORT=6379
+DB_HOST=db
+DB_USER=postgres
+DB_NAME=postgres
+DB_PASS=
+DB_PORT=5432
+
+LOCAL_DOMAIN=mast.iankelling.org
+LOCAL_HTTPS=true
+
+SINGLE_USER_MODE=true
+
+SMTP_SERVER=mail.iankelling.org
+SMTP_PORT=25
+SMTP_LOGIN=li
+SMTP_FROM_ADDRESS=notifications@mast.iankelling.org
+SMTP_DOMAIN=mast.iankelling.org
+SMTP_DELIVERY_METHOD=smtp
+EOF
+
+ for key in PAPERCLIP_SECRET SECRET_KEY_BASE OTP_SECRET; do
+ # 1 minute 7 seconds to run this docker command
+ # to generate a secret, and it has ^M chars at the end. wtf. really dumb
+ printf "%s=%s\n" $key "$(docker-compose run --rm web rake secret|dos2unix|tail -n1)" >>.env.production
+ done
+ found=false
+ while read -r domain port pass; do
+ if [[ $domain == mail.iankelling.org ]]; then
+ found=true
+ # remove the username part
+ pass="${pass#*:}"
+ printf "SMTP_PASSWORD=%s\n" "$pass" >>.env.production
+ break
+ fi
+ done < <(s cat /etc/mailpass)
+ if ! $found; then
+ echo "$0: error, failed to find mailpass domain for mastadon"
+ exit 1
+ fi
+
+ # docker compose makes an interface named like br-8f3e208558f2. we need mail to
+ # get routed to us.
+ if ! s /sbin/iptables -t nat -C PREROUTING -i br-+ -p tcp -m tcp --dport 25 -j DNAT --to-destination 10.8.0.4:25; then
+ s /sbin/iptables -t nat -A PREROUTING -i br-+ -p tcp -m tcp --dport 25 -j DNAT --to-destination 10.8.0.4:25
+ fi
+
+ docker-compose run --rm web rake mastodon:webpush:generate_vapid_key | grep -E '^VAPID_PUBLIC_KEY=|^VAPID_PRIVATE_KEY=' >> .env.production
+ logq docker-compose run --rm web rake db:migrate
+ docker-compose run --rm web rails assets:precompile
+
+ # avatar failed to upload, did
+ # docker logs mastodon_web_1
+ # google lead me to this
+ s chown -R 991:991 public/system
+
+ # docker daemon takes care of starting on boot.
+ docker-compose up -d
+
+ s a2enmod proxy_wstunnel headers
+ web-conf -f 3000 - apache2 mast.iankelling.org <<'EOF'
+ ProxyPreserveHost On
+ RequestHeader set X-Forwarded-Proto "https"
+ ProxyPass /500.html !
+ ProxyPass /oops.png !
+ ProxyPass /api/v1/streaming/ ws://localhost:4000/
+ ProxyPassReverse /api/v1/streaming/ ws://localhost:4000/
+ ErrorDocument 500 /500.html
+ ErrorDocument 501 /500.html
+ ErrorDocument 502 /500.html
+ ErrorDocument 503 /500.html
+ ErrorDocument 504 /500.html
+EOF
+
+
+ ############### !!!!!!!!!!!!!!!!!
+ ############### manual steps:
+
+ # only following 2 people atm, so not bothering to figure out backups
+ # when mastodon has not documented it at all.
+ #
+ # fsf@status.fsf.org
+ # cwebber@toot.cat
+ # dbd@status.fsf.org
+ # johns@status.fsf.org
+
+ # sign in page is at https://mast.iankelling.org/auth/sign_in
+ # register as iank, then
+ # https://github.com/tootsuite/documentation/blob/master/Running-Mastodon/Administration-guide.md
+ # docker-compose run --rm web bundle exec rails mastodon:make_admin USERNAME=iank
+
+ ############# end setup mastodon ##############
+
+ # we use nsupdate to update the ip of home
+ pi bind9
+
+ pi znc
+ # znc config generated by doing
+ # znc --makeconf
+ # selected port is also used in erc config
+ # comma separated channel list worked.
+ # while figuring things out, running znc -D for debug in foreground.
+ # to exit and save config:
+ # /msg *status shutdown
+ # configed auth on freenode by following
+ # https://wiki.znc.in/Sasl
+ # created the system service after, and had to do
+ # mv /home/iank/.znc/* /var/lib/znc
+ # sed -i 's,/home/iank/.znc/,/var/lib/znc,' /var/lib/znc/config/znc.conf
+ # and made a copy of the config files into /p/c
+ # added LoadModule = log -sanitize to the top level
+ # to get into the web interface,
+ # cat /etc/letsencrypt/live/iankelling.org/{privkey,cert,chain}.pem > /var/lib/znc/znc.pem
+ # then use non-main browser or else it doesn't allow it based on ocsp stapling from my main site.
+ # i'm going to figure out how to automate this when it expires. i know i can hook a script into the renewal. https://wiki.znc.in/FAQ seems to imply that znc doesn\'t need restart.
+ # todo: in config file AllowWeb = true should be false. better security if that is off unless we need it.
+ # todo: figure out how to make playback in erc happe.n
+ s useradd --create-home -d /var/lib/znc --system --shell /sbin/nologin --comment "Account to run ZNC daemon" --user-group znc || [[ $? == 9 ]] # 9 if it exists already
+ chmod 700 /var/lib/znc
+ s dd of=/etc/systemd/system/znc.service 2>/dev/null <<'EOF'
+[Unit]
+Description=ZNC, an advanced IRC bouncer
+After=network-online.target
+
+[Service]
+ExecStart=/usr/bin/znc -f --datadir=/var/lib/znc
+User=znc
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ ser daemon-reload
+ sgo znc
+
+ echo "$0: $(date): ending now)"
+ exit 0
+ ;;
+esac
+
+
+# needed for checkrestart
+if isdeb; then
+ spa debian-goodies
+fi
+
+
+
+########### end section including li/lj ###############
+
+case $distro in
+ debian) spa gnome-session-flashback ;;
+ # flidas is missing dependency gnome-panel. others unknown
+esac
+
+
+
+case $distro in
+ trisquel|ubuntu|debian) spa ack-grep ;;
+ arch|fedora) spa ack ;;
+ # fedora unknown
+esac
+
+
+
+case $distro in
+ debian)
+ pi chromium ;;
+ xenial|ubuntu)
+ wget -qO - https://downloads.iridiumbrowser.de/ubuntu/iridium-release-sign-01.pub|sudo apt-key add -
+ cat <<EOF | sudo tee /etc/apt/sources.list.d/iridium-browser.list
+deb [arch=amd64] https://downloads.iridiumbrowser.de/deb/ stable main
+#deb-src https://downloads.iridiumbrowser.de/deb/ stable main
+EOF
+ p update
+ pi iridium-browser
+ ;;
+esac
+
+case $distro in
+ debian)
+ spa cpio-doc ;;
+ # not packaged in flidas. others unknown. gfdl nonfree issue
+esac
+
+
+
+
+case $distro in
+ fedora) spa unrar ;;
+ *) spa unrar-free ;;
+esac
+
+
+### begin home vpn server setup
+
+
+# # this section done initially to make persistent keys.
+# # Also note, I temporarily set /etc/hosts so my host was
+# # b8.nz when running this, since the vpn client config
+# # generator assumes we need to go to that server to get
+# # server keys.
+# vpn-server-setup -rds
+# s cp -r --parents /etc/openvpn/easy-rsa/keys /p/c/filesystem
+# s chown -R 1000:1000 /p/c/filesystem/etc/openvpn/easy-rsa/keys
+# # kw = kgpe work machine.
+# for host in x2 kw; do
+# vpn-mk-client-cert -b $host -n home b8.nz 1196
+# dir=/p/c/machine_specific/$host/filesystem/etc/openvpn/client
+# mkdir -p $dir
+# s bash -c "cp /etc/openvpn/client/home* $dir"
+# # note: /etc/update-resolv-conf-home also exists for all systems with /p
+# done
+
+# key already exists, so this won't generate one, just the configs.
+vpn-server-setup -rds
+s tee -a /etc/openvpn/server/server.conf <<'EOF'
+push "dhcp-option DNS 192.168.1.1"
+push "route 192.168.1.0 255.255.255.0"
+client-connect /a/bin/distro-setup/vpn-client-connect
+EOF
+s sed -i --follow-symlinks 's/10.8./10.9./g;s/^\s*port\s.*/port 1196/' /etc/openvpn/server/server.conf
+
+if [[ $HOSTNAME == tp ]]; then
+ if [[ -e /lib/systemd/system/openvpn-server@.service ]]; then
+ vpn_service=openvpn-server@server
+ else
+ vpn_service=openvpn@server
+ fi
+ sgo $vpn_service
+fi
+### end vpn server setup
+
+if [[ $HOSTNAME == tp ]]; then
+
+ # note, see bashrc for more documentation.
+ pi rss2email
+ s dd of=/etc/systemd/system/rss2email.service <<'EOF'
+[Unit]
+Description=rss2email
+After=multi-user.target
+
+[Service]
+User=iank
+Type=oneshot
+# about 24 hours of failures
+# it copies over its files without respecting symlinks, so
+# we pass options to use different location.
+ExecStart=/a/bin/log-quiet/sysd-mail-once -288 rss2email r2e -d /p/c/rss2email.json -c /p/c/rss2email.cfg run
+EOF
+ s dd of=/etc/systemd/system/rss2email.timer <<'EOF'
+[Unit]
+Description=rss2email
+
+[Timer]
+# for initial run. required.
+OnActiveSec=30
+# for subsequent runs.
+OnUnitInactiveSec=300
+
+[Install]
+WantedBy=timers.target
+EOF
+ s systemctl daemon-reload
+ sgo rss2email.timer
+fi
+
+######### begin pump.io periodic backup #############
+if [[ $HOSTNAME == frodo ]]; then
+ s dd of=/etc/systemd/system/pumpbackup.service <<'EOF'
+[Unit]
+Description=pump li backup
+After=multi-user.target
+
+[Service]
+User=iank
+Type=oneshot
+ExecStart=/a/bin/log-quiet/sysd-mail-once pump-backup /a/bin/distro-setup/pump-backup
+EOF
+ s dd of=/etc/systemd/system/pumpbackup.timer <<'EOF'
+[Unit]
+Description=pump li backup hourly
+
+[Timer]
+OnCalendar=hourly
+
+[Install]
+WantedBy=timers.target
+EOF
+ s systemctl daemon-reload
+ sgo pumpbackup.timer
+fi
+######### end pump.io periodic backup #############
+
+case $distro in
+ debian|trisquel|ubuntu)
+ # suggests because we want the resolvconf package. however, i install it earlier
+ # as well, so this is redundant.
+ # todo: check other distros to make sure it\'s installed
+ pi-nostart --install-suggests openvpn
+ # pi-nostart does not disable
+ ser disable openvpn
+ ;;
+ *) pi openvpn;;
+esac
+
+/a/bin/distro-setup/radicale-setup
+
+## android studio setup
+# this contains the setting for android sdk to point to
+# /a/opt/androidsdk, which is asked upon first run
+lnf /a/opt/.AndroidStudio2.2 ~
+# android site says it needs a bunch of packages for ubuntu,
+# but I googled for debian, and someone says you just need lib32stdc++6 plus the
+# jdk
+# https://pid7007blog.blogspot.com/2015/07/installing-android-studio-in-debian-8.html
+# see w.org for more android studio details
+spa lib32stdc++6 default-jdk
+
+
+if [[ $HOSTNAME == frodo ]]; then
+ ############# begin syncthing setup ###########
+
+ # It\'s simpler to just worry about running it in one place for now.
+ # I assume it would work to clone it\'s config to another non-phone
+ # and just run it in one place instead of the normal having a
+ # separate config. I lean toward using the same config, since btrfs
+ # syncs between comps.
+ case $distro in
+ arch) pi syncthing ;;
+ trisquel|ubuntu|debian)
+ # testing has relatively up to date packages
+ if ! isdebian-testing; then
+ # based on error when doing apt-get update:
+ # E: The method driver /usr/lib/apt/methods/https could not be found.
+ pi apt-transport-https
+ # google led me here:
+ # https://apt.syncthing.net/
+ curl -s https://syncthing.net/release-key.txt | sudo apt-key add -
+ s="deb http://apt.syncthing.net/ syncthing release"
+ if [[ $(cat /etc/apt/sources.list.d/syncthing.list) != $s ]]; then
+ echo "$s" | s dd of=/etc/apt/sources.list.d/syncthing.list
+ p update
+ fi
+ fi
+ pi syncthing
+ ;;
+ esac
+ lnf -T /w/syncthing /home/iank/.config/syncthing
+ ser daemon-reload # syncthing likely not properly packaged
+ sgo syncthing@iank # runs as iank
+
+ # these things persist in ~/.config/syncthing, which I save in
+ # /w/syncthing (not in /p, because syncthing should continue to
+ # run on home server even when using laptop as primary device)
+ # open http://localhost:8384/
+ # change listen address from default to tcp://:22001,
+ # this is because we do port forward so it doesn\'t have to use
+ # some external server, but the syncthing is broken for port forward,
+ # you get a message, something "like connected to myself, this should not happen"
+ # when connecting to other local devices, so I bump the port up by 1,
+ # based on
+ # https://forum.syncthing.net/t/connected-to-myself-should-not-happen/1763/19.
+ # Without this, it was being stuck syncing at 0%.
+ # Set gui username and password.
+ #
+ # install syncthing via f-droid,
+ # folder setting, turn off send only.
+ # on phone, add device, click bar code icon
+ # on dekstop, top right, actions, device id
+ # after adding, notification will appear on desktop to confirm
+ #
+ # syncing folder. from phone to desktop: select desktop in the
+ # folder on phone\'s sync options, notification will appear in
+ # desktop\'s web ui within a minute. For the reverse, the
+ # notification will appear in android\'s notifications, you have to
+ # swipe down and tap it to add the folder. It won\'t appear in the
+ # syncthing ui, which would be intuitive, but don\'t wait for it
+ # there. The notification may not work, instead open the web gui
+ # from in the app, there should be a notification within there.
+ #
+ # On phone, set settings to run syncthing all the time, and
+ # show no notification.
+ #
+ # Folder versioning would make sense if I didn\'t already use btrfs
+ # for backups. I would choose staggered, or trash can for more space.
+ #
+ # if needed to install on a remote comp:
+ # ssh -L 8384:localhost:8384 -N frodo
+ # open http://localhost:8384/
+ #
+ # Note, the other thing i did was port forward port 22000,
+ # per https://docs.syncthing.net/users/firewall.html
+
+ ############# end syncthing setup ###########
+fi
+
+
+
+# no equivalent in other distros:
+case $distro in
+ debian|trisquel|ubuntu)
+ # for gui bug reporting
+ spa python-vte
+ ;;
+esac
+
+
+####### begin misc packages ###########
+
+reset-sakura
+sudo -u traci -i reset-sakura
+reset-konsole
+sudo -u traci -i reset-konsole
+reset-xscreensaver
+# this is packaged, but i see it's gotten a fair amount of development lately,
+# so install from cabal. the options are needed to get over incompatible xmonad library versions
+# but that stuff is in the global namespace, and it seems they don't conflict in practice.
+pi libxss-dev # dependency based on build failure
+cabal update
+cabal install --upgrade-dependencies --force-reinstalls arbtt
+lnf -T /m/arbtt-capture.log ~/.arbtt/capture.log
+
+if [[ ! -e ~/.linphonerc && -e /p/.linphonerc-initial ]]; then
+ cp /p/.linphonerc-initial ~/.linphonerc
+fi
+
+
+### begin spd install
+pi libswitch-perl libdigest-md5-file-perl libgnupg-interface-perl
+t=$(mktemp)
+wget -O $t http://mirror.fsf.org/fsfsys-trisquel/fsfsys-trisquel/pool/main/s/spd-perl/spd-perl_0.2-1_amd64.deb
+s dpkg -i $t
+rm $t
+# this guesses at the appropriate directory, adjust if needed
+x=(/usr/lib/x86_64-linux-gnu/perl/5.*)
+sudo ln -sf ../../../perl/5.18.2/SPD/ $x
+# newer distro had gpg2 as default, older one, flidas, need to make it that way
+x=$(which gpg2)
+if [[ $x ]]; then
+ s lnf -T $x /usr/local/bin/gpg
+fi
+### end spd install
+
+
+if [[ $HOSTNAME == kw ]]; then
+ cat <<'EOF'
+NOTE: after this finishes, i did
+s nmtui-connect
+# remove br from auto:
+s vim /etc/network/interfaces
+EOF
+fi
+
+# nagstamon setting which were set through the ui
+# in filters tab:
+# all unknown sources
+# all warning services
+# acknowledged hosts & services
+# hosts & services down for maintenence
+# services on down hosts
+# services on hosts in maintenece
+# services on unreachable osts
+# hosts in soft state
+# services in soft state
+# in display tab: fullscreen
+
+# these translate to these settings I think
+# filter_acknowledged_hosts_services = True
+# filter_all_unknown_services = True
+# filter_all_warning_services = True
+# filter_hosts_in_soft_state = True
+# filter_hosts_services_maintenance = True
+# filter_services_in_soft_state = True
+# filter_services_on_down_hosts = True
+# filter_services_on_hosts_in_maintenance = True
+# filter_services_on_unreachable_hosts = True
+# notify_if_up = False
+# statusbar_floating = False
+# fullscreen = True
+# but i'm just going to rely on the webpage plus sms for now.
+
+
+case $distro in
+ debian|trisquel|ubuntu)
+ # it asks if it should make users in it's group capture packets without root,
+ # which is arguably more secure than running wireshark as root. default is no,
+ # which is what i prefer, since I plan to use tcpdump to input to wireshark.
+ s DEBIAN_FRONTEND=noninteractive pi wireshark-gtk
+ ;;
+ # others unknown
+esac
+
+# /run and /dev/shm are listed as required for pulseaudio. All 4 in the group
+# listed in the default config as suggested.
+# /run/usr/1000 i noticed was missing for pulseaudio
+# /run/user/0 just seemed like a not bad idea, given the above
+tu /etc/schroot/desktop/fstab <<'EOF'
+/run /run none rw,bind 0 0
+/run/lock /run/lock none rw,bind 0 0
+/dev/shm /dev/shm none rw,bind 0 0
+/run/shm /run/shm none rw,bind 0 0
+/run/user/1000 /run/user/1000 none rw,bind 0 0
+/run/user/1001 /run/user/1001 none rw,bind 0 0
+/run/user/0 /run/user/0 none rw,bind 0 0
+EOF
+
+mkschroot() {
+ n=$1
+ shift
+ apps=($@)
+ d=/nocow/schroot/$n
+ s dd of=/etc/schroot/chroot.d/$n.conf <<EOF
+[$n]
+description=$n
+type=directory
+directory=$d
+profile=desktop
+preserve-environment=true
+users=$USER,traci
+EOF
+ if [[ -e $d/bin ]]; then
+ s chroot $d apt-get update
+ s chroot $d apt-get -y dist-upgrade --purge --auto-remove
+ cd; s schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
+ else
+ s mkdir -p $d
+ s debootstrap $n $d http://deb.debian.org/debian/
+ cd; s schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
+ fi
+ s cp -P {,$d}/etc/localtime
+}
+s dd of=/etc/systemd/system/schrootupdate.service <<'EOF'
+[Unit]
+Description=schrootupdate
+After=multi-user.target
+
+[Service]
+Type=oneshot
+ExecStart=/a/bin/log-quiet/sysd-mail-once schrootupdate /a/bin/distro-setup/schrootupdate
+EOF
+s dd of=/etc/systemd/system/schrootupdate.timer <<'EOF'
+[Unit]
+Description=schrootupdate
+
+[Timer]
+OnCalendar=*-*-* 04:20:00
+
+[Install]
+WantedBy=timers.target
+EOF
+s systemctl daemon-reload
+sgo schrootupdate.timer
+
+
+
+
+# for my roommate
+case $distro in
+ trisquel)
+ mkschroot stretch firefox-esr pulseaudio chromium
+ ;;
+esac
+
+s mkdir -p /nocow/user
+s chown $USER:$USER /nocow/user
+case $distro in
+ debian)
+ case $(debian-codename) in
+ jessie)
+ pi anki
+ ;;
+ *)
+ mkschroot jessie anki pulsaudio mplayer
+ ;;
+ esac
+ ;;
+ trisquel|ubuntu)
+ pi anki
+ ;;
+ # others unknown
+esac
+
+case $distro in
+ debian|trisquel|ubuntu)
+ # note i had to do this, which is persistent:
+ # cd /i/k
+ # s chgrp debian-transmission torrents partial-torrents
+
+ # syslog says things like
+ # 'Failed to set receive buffer: requested 4194304, got 425984'
+ # google suggets giving it even more than that
+ tu /etc/sysctl.conf<<'EOF'
+net.core.rmem_max = 67108864
+net.core.wmem_max = 16777216
+EOF
+ s sysctl -p
+
+ # some reason it doesn\'t seem to start automatically anyways
+ pi-nostart transmission-daemon
+
+ # the folder was moved here after an install around 02/2017.
+ # it contains runtime data,
+ # plus a simple symlink to the config file which it\'s
+ # not worth separating out.
+ # between comps, the uid can change
+ f=/i/transmission-daemon
+ s lnf -T $f /var/lib/transmission-daemon/.config/transmission-daemon
+ if [[ -e $f ]]; then
+ s chown -R debian-transmission:debian-transmission $f
+ fi
+ for f in /i/k/partial-torrents /i/k/torrents; do
+ if [[ -e $f ]]; then
+ s chown -R debian-transmission:traci $f
+ fi
+ done
+ s chown -R debian-transmission:debian-transmission /var/lib/transmission-daemon
+ #
+ # config file documented here, and it\'s the same config
+ # for daemon vs client, so it\'s documented in the gui.
+ # https://trac.transmissionbt.com/wiki/EditConfigFiles#Options
+ #
+ # I originaly setup rpc-whitelist, but after using
+ # routing to a network namespace, it doesn\'t see the
+ # real source address, so it\'s disabled.
+ #
+ # Changed the cache-size to 256 mb, reduces disk use.
+ # It is a read & write cache.
+ #
+ s ruby <<'EOF'
+require 'json'
+p = '/etc/transmission-daemon/settings.json'
+File.write(p, JSON.pretty_generate(JSON.parse(File.read(p)).merge({
+'rpc-whitelist-enabled' => false,
+'rpc-authentication-required' => false,
+'incomplete-dir' => '/i/k/partial-torrents',
+'incomplete-dir-enabled' => true,
+'download-dir' => '/i/k/torrents',
+"speed-limit-up" => 800,
+"speed-limit-up-enabled" => true,
+"peer-port" => 61486,
+"cache-size-mb" => 256,
+"ratio-limit" => 5.0,
+"ratio-limit-enabled" => true,
+})) + "\n")
+EOF
+
+ # make sure its not enabled, not sure if this is needed
+ ser disable transmission-daemon
+ ;;
+ # todo: others unknown
+esac
+# adapted from /var/lib/dpkg/info/transmission-daemon.postinst
+if ! getent passwd debian-transmission > /dev/null; then
+ case $distro in
+ arch)
+ s useradd \
+ --system \
+ --create-home \
+ --home-dir /var/lib/transmission-daemon \
+ --shell /bin/false \
+ debian-transmission
+ ;;
+ *)
+ s adduser --quiet \
+ --system \
+ --group \
+ --no-create-home \
+ --disabled-password \
+ --home /var/lib/transmission-daemon \
+ debian-transmission
+ ;;
+ esac
+fi
+
+
+# trisquel 8 = openvpn, debian stretch = openvpn-client
+vpn_ser=openvpn-client
+if [[ ! -e /lib/systemd/system/openvpn-client@.service ]]; then
+ vpn_ser=openvpn
+fi
+
+s dd of=/etc/systemd/system/transmission-daemon-nn.service <<EOF
+[Unit]
+Description=Transmission BitTorrent Daemon netns
+After=network.target
+Requires=${vpn_ser}-nn@client.service
+After=${vpn_ser}-nn@client.service
+JoinsNamespaceOf=${vpn_ser}-nn@client.service
+
+[Service]
+#User=debian-transmission
+# notify type doesn't work with sudo
+#Type=notify
+ExecStart=/usr/bin/nsenter --mount=/root/mount_namespaces/client sudo -u debian-transmission /usr/bin/transmission-daemon -f --log-error
+ExecReload=/bin/kill -s HUP \$MAINPID
+PrivateNetwork=true
+Nice=19
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ser daemon-reload
+
+if [[ $HOSTNAME == frodo ]]; then
+ sgo transmission-daemon-nn
+fi
+
+
+######### begin transmission client setup ######
+
+if [[ -e /p/transmission-rpc-pass ]]; then
+ # arch had a default config,
+ # debian had nothing until you start it.
+ # With a little trial an error, here is a minimal config
+ # taken from the generated one, plus changes that the
+ # settings ui does, without a bunch of ui crap settings.
+ #
+ # only settings I set were
+ # hostname
+ # auto-connect
+ # password
+
+ # the password is randomly generated on first run, i copied it out
+ # so it could be used by other hosts.
+ s ruby <<'EOF'
+require 'json'
+p = '/etc/transmission-daemon/settings.json'
+s = JSON.parse(File.read(p))
+s["rpc-password"] = File.read("/p/transmission-rpc-pass").chomp
+File.write p, JSON.pretty_generate(s)
+EOF
+
+ rpc_pass=$(</p/transmission-rpc-pass)
+ for f in /home/*; do
+ d=$f/.config/transmission-remote-gtk
+ u=${f##*/}
+ s -u $u mkdir -p $d
+ s -u $u dd of=$d/config.json <<EOF
+{
+ "profiles" : [
+ {
+ "profile-name" : "Default",
+ "hostname" : "transmission.b8.nz",
+ "rpc-url-path" : "/transmission/rpc",
+ "username" : "",
+ "password" : "$rpc_pass",
+ "auto-connect" : true,
+ "ssl" : false,
+ "timeout" : 40,
+ "retries" : 3,
+ "update-active-only" : false,
+ "activeonly-fullsync-enabled" : false,
+ "activeonly-fullsync-every" : 2,
+ "update-interval" : 3,
+ "min-update-interval" : 3,
+ "session-update-interval" : 60,
+ "exec-commands" : [
+ ],
+ "destinations" : [
+ ]
+ }
+ ],
+ "profile-id" : 0,
+ "add-options-dialog" : false
+}
+EOF
+ done
+fi
+
+# dunno why it\'s there, but get rid of it
+case $HOSTNAME in
+ li|lj) s rm -rf /home/linode ;;
+esac
+
+
+# printer
+case $distro in
+ arch)
+ pi cups ghostscript gsfonts # from arch wiki cups page
+ pi hplip # from google
+ s gpasswd -a $USER sys # from arch wiki
+ sgo org.cups.cupsd.service
+ # goto http://127.0.0.1:631
+ # administration tab, add new printer button.
+ # In debian, I could use hte recommended driver,
+ # in arch, I had to pick out the 6L driver.
+ ;;
+ debian|trisquel|ubuntu)
+ spa hplip
+ ;;
+ # other distros unknown
+esac
+
+
+case $distro in
+ trisquel|ubuntu|debian) pi --no-install-recommends mairix notmuch ;;
+ fedora|arch) spa mairix notmuch ;;
+esac
+case $distro in
+ arch) spa nfs-utils ;;
+ trisquel|ubuntu|debian) spa nfs-client ;;
+esac
+case $distro in
+ trisquel|ubuntu|debian) spa par2 ;;
+ arch|fedora) spa par2cmdline ;;
+esac
+
+# needed for my tex resume
+case $distro in
+ trisquel|ubuntu|debian) spa texlive-full ;;
+ arch) spa texlive-most ;;
+ # fedora unknown
+esac
+
+
+case $distro in
+ # optional dep for firefox for h.264 video
+ arch) spa gst-libav ;;
+ # other distros, probably come by default
+esac
+
+case $distro in
+ fedora|trisquel|ubuntu|debian) spa gnupg-agent ;;
+ arch) : ;;
+esac
+
+
+case $distro in
+ fedora) spa pinentry-gtk ;;
+ *) : ;; # comes default or with other packages
+esac
+
+case $distro in
+ arch) spa firefox pulseaudio;;
+ trisquel) spa abrowser ;;
+ *) : ;; # comes default or with other packages, or uknown
+esac
+
+
+case $distro in
+ arch) spa ttf-dejavu;;
+ debian|trisquel|ubuntu) spa fonts-dejavu ;;
+ # others unknown
+esac
+
+
+case $distro in
+ arch) spa xorg-xev;;
+ debian|trisquel|ubuntu) spa x11-utils ;;
+ # others unknown
+esac
+
+case $distro in
+ arch) pi virt-install;;&
+ debian|trisquel|ubuntu) pi virtinst ;;&
+ *) pi virt-manager ;; # creates the libvirt group in debian at least
+ # others unknown
+esac
+# allow user to run vms, from debian handbook
+for x in iank traci; do s usermod -a -G libvirt,kvm $x; done
+# bridge networking as user fails. google lead here, but it doesn\'t work:
+# oh well, I give up.
+# http://wiki.qemu.org/Features-Done/HelperNetworking
+# s mkdir /etc/qemu
+# f=/etc/qemu/bridge.conf
+# s dd of=$f <<'EOF'
+# allow br0
+# EOF
+# #s chown root:qemu $f # debian has somethig like qemu-libvirt. equivalent?
+# s chmod 640 $f
+
+
+case $distro in
+ arch) spa cdrkit;;
+ debian|trisquel|ubuntu) spa genisoimage;;
+ # others unknown
+esac
+
+case $distro in
+ arch) spa spice-gtk3 ;;
+ debian|trisquel|ubuntu) spa spice-client-gtk;;
+ # others unknown
+esac
+
+# general known for debian/ubuntu, not for fedora
+
+case $distro in
+ debian|trisquel|ubuntu)
+ pi golang-go
+ # a bit of googling, and added settings to bashrc
+ go get -u github.com/mvdan/fdroidcl/cmd/fdroidcl
+ ;;
+ # others unknown
+esac
+
+
+case $distro in
+ arch)
+ # cdrkit for cloud-init isos
+ # dnsmasq & ebtables for nat networking in libvirt
+ # qemu for qemu-img, bind-tools for dig
+ # dmidecode just because syslog complains
+ pi unzip xorg-xmodmap dmidecode ebtables\
+ bridge-utils dnsmasq qemu bind-tools
+ # otherwise we get error about accessing kvm module.
+ # seems like there might be a better way, but google was a bit vague.
+ s $sed -ri '/^ *user *=/d' /etc/libvirt/qemu.conf
+ echo 'user = "root"' | s tee -a /etc/libvirt/qemu.conf
+ # https://bbs.archlinux.org/viewtopic.php?id=206206
+ # # this should prolly go in the wiki
+ sgo virtlogd.socket
+ # guessing this is not needed
+ #sgo virtlogd.service
+ sgo libvirtd
+
+ ;;
+esac
+
+case $distro in
+ arch) pi virtviewer ;;
+ *) : ;; # other distros have it as a dependency afaik.
+esac
+
+
+
+case $distro in
+ fedora) cabal install shellcheck ;;
+ *) spa shellcheck ;;
+ # unknown for older ubuntu
+esac
+
+
+case $distro in
+ arch|debian|trisquel|ubuntu) spa pumpa ;;
+ # others unknown. do have a buildscript:
+ # /a/bin/buildscripts/pumpa ;;
+esac
+
+
+case $distro in
+ debian) pi adb ;;
+ debian|trisquel|ubuntu) spa android-tools-adbd ;;
+ # todo: not sure this is needed anymore, or if trisqel etc works even
+ # debian) spa android-tools-adbd/unstable ;;
+ arch) spa android-tools ;;
+ # other distros unknown
+esac
+
+
+
+case $distro in
+ fedora)
+ cd $(mktemp -d)
+ wget ftp://ftp.gnu.org/pub/gnu/global/global-6.5.7.tar.gz
+ ex global*
+ cd global-6.5.7
+ # based on https://github.com/leoliu/ggtags
+ ./configure --with-exuberant-ctags=/usr/bin/ctags
+ make
+ s make install
+ s pip install pygments
+ ;;
+ *)
+ pi global
+ ;;&
+ arch)
+ pi python2-pygments
+ ;;
+ debian|trisquel|ubuntu)
+ pi python-pygments
+ ;;
+esac
+
+
+case $distro in
+ debian)
+ pi task-mate-desktop
+ # in settings, change scrolling to two-finger,
+ # because the default edge scroll doesn\'t work.
+ pu transmission-gtk
+ ;;
+ trisquel)
+ # mate-indicator-applet and beyond are msc things I noticed diffing a
+ # standard install with mine.
+ pi xorg lightdm mate-desktop-environment mate-desktop-environment-extras mate-indicator-applet anacron
+ ;;
+ # others unknown
+esac
+
+case $distro in
+ arch) spa apg ;;
+ # already in debian
+esac
+
+
+
+
+
+# note this failed running at the beginning of this file,
+# because no systemd user instance was running.
+# Doing systemd --user resulted in
+# Trying to run as user instance, but $XDG_RUNTIME_DIR is not set
+
+if isdebian-testing; then
+ # as of 7/2016, has no unstable deps, and is not in testing anymore.
+ pi synergy/unstable
+else
+ pi synergy
+fi
+
+# case $distro in
+# # ubuntu unknown. probably the same as debian, just check if the
+# # init scripts come with the package.
+# debian)
+# # copied from arch, but moved to etc
+# s dd of=/etc/systemd/user/synergys.service <<'EOF'
+# [Unit]
+# Description=Synergy Server Daemon
+# After=network.target
+
+# [Service]
+# User=%i
+# ExecStart=/usr/bin/synergys --no-daemon --config /etc/synergy.conf
+# Restart=on-failure
+
+# [Install]
+# WantedBy=multi-user.target
+# EOF
+# s dd of=/etc/systemd/user/synergys.socket <<'EOF'
+# [Unit]
+# Conflicts=synergys@.service
+
+# [Socket]
+# ListenStream=24800
+# Accept=false
+
+# [Install]
+# WantedBy=sockets.target
+# EOF
+# # had this fail with 'Failed to connect to bus: No such file or directory'
+# # then when I tried it manually, it worked fine...
+# if ! systemctl --user daemon-reload; then
+# sleep 2
+# echo retrying systemd user daemon reload
+# systemctl --user daemon-reload
+# fi
+# ;;&
+# *)
+# # taken from arch wiki.
+# s dd of=/etc/systemd/system/synergyc@.service <<'EOF'
+# [Unit]
+# Description=Synergy Client
+# After=network.target
+
+# [Service]
+# User=%i
+# ExecStart=/usr/bin/synergyc --no-daemon frodo
+# Restart=on-failure
+# # per man systemd.unit, StartLimitInterval, by default we
+# # restart more than 5 times in 10 seconds.
+# # And this param defaults too 200 miliseconds.
+# RestartSec=3s
+
+# [Install]
+# WantedBy=multi-user.target
+# EOF
+# s systemctl daemon-reload
+# case $HOSTNAME in
+# x2|tp)
+# ser enable synergyc@iank
+# ser start synergyc@iank ||: # X might not be running yet
+# ;;
+# frodo)
+# systemctl --user start synergys ||:
+# systemctl --user enable synergys
+# ;;
+# esac
+# ;;
+# esac
+
+
+pi --no-install-recommends kdeconnect-plasma
+### kdeconnect for gnome. started in /a/bin/distro-setup/desktop-20-autostart.sh
+### but gnome + xmonad not working in flidas, so i disabled it
+pi libgtk-3-dev python3-requests-oauthlib valac cmake python-nautilus libappindicator3-dev
+cd /a/opt/indicator-kdeconnect
+mkdir -p build
+cd build
+cmake .. -DCMAKE_INSTALL_PREFIX=/usr
+make
+sudo make install
+# we can start it manually with /usr/lib/x86_64-linux-gnu/libexec/kdeconnectd
+# it seems, according to
+# /etc/xdg/autostart/kdeconnectd.desktop
+# I'm not seeing the icon, but the clipboard replication is working
+
+
+### model 01 arduino support ###
+# https://github.com/keyboardio/Kaleidoscope/wiki/Install-Arduino-support-on-Linux
+# also built latest arduino in /a/opt/Arduino, (just cd build; ant build; ant run )
+# set arduino var in bashrc,
+# have system config file setup too.
+sudo adduser $USER dialout
+case $distro in
+ arch)
+ sudo usermod -a -G uucp $USER
+ ;;
+esac
+lnf -T /a/opt/sketches ~/Arduino
+
+# this is for the mail command too. update-alternatives is kind of misleading
+# since at least it's main commands pretend mail does not exist.
+# bsd's mail got pulled in on some dumb dependency, i dunno how.
+s update-alternatives --set mailx /usr/bin/mail.mailutils
+
+######### end misc packages #########
+
+
+# packages I once used before and liked, but don\'t want installed now for
+# various reasons:
+# python-sqlite is used for offlineimap
+# lxappearance python-sqlite dolphin paman dconf-editor
+
+
+
+######## unfinished
+
+# todo, finish configuring smart.
+
+pi smartmontools
+# mostly from https://wiki.archlinux.org/index.php/S.M.A.R.T.
+# turn on smart. background on options:
+# first line, -a = test everyting on all devices.
+# -S on, turn on disk internal saving of vendor specific info,
+# from google, seems like this is usually already on and fairly standard.
+# -o on, turn on 4 hour period non-performance degrading testing.
+# short test daily 2-3am, extended tests Saturdays between 3-4am:
+sched="-s (S/../.././02|L/../../6/03)"
+s sed -i --follow-symlinks "s#^[[:space:]]*DEVICESCAN.*#\
+DEVICESCAN -a -o on -S on -n standby,q $sched \
+-m ian@iankelling.org -M exec /usr/local/bin/smart-notify#" /etc/smartd.conf
+
+# in the default configuration of at least ubuntu 14.04, resolvconf is
+# configured to order any nameservers associated with tun* or tap*
+# before the normal internet interfaces, which means they are always
+# consulted first. This is often slower and undesirable, ie. local dns
+# queries go from 0ms to 10+ or 100+ ms. To reverse the ordering, you
+# can do:
+#sudo sed -i --follow-symlinks '/tun\*\|tap\*/d' /etc/resolvconf/interface-order
+# however, this breaks dns lookup for hosts on the openvpn lan.
+# I can\'t figure out why hosts on the normal lan would not be
+# broken under the default ordering, except the host I was
+# testing with previously had an entry in /etc/hosts.
+
+############# end unfinished
+
+########### misc stuff
+
+
+/a/bin/distro-setup/mymimes
+
+
+# stop autopoping windows when i plug in an android phone.
+# dbus-launch makes this work within an ssh connection, otherwise you get this message,
+# with still 0 exit code.
+# dconf-WARNING **: failed to commit changes to dconf: Cannot autolaunch D-Bus without X11 $DISPLAY
+dbus-launch gsettings set org.gnome.desktop.media-handling automount-open false
+
+
+# on grub upgrade, we get prompts unless we do this
+devs=()
+for dev in $(s btrfs fi show /boot | sed -nr 's#.*path\s+(\S+)$#\1#p'); do
+ devs+=($(devbyid $dev),)
+done
+devs[-1]=${devs[-1]%,} # jonied by commas
+s debconf-set-selections <<EOF
+grub-pc grub-pc/install_devices multiselect ${devs[*]}
+EOF
+
+# btrfs maintenance
+sgo btrfsmaint.timer
+sgo btrfsmaintstop.timer
+
+
+# the wiki backup script from ofswiki.org uses generic paths
+s lnf /p/c/machine_specific/li/mw_vars /root
+s lnf /k/backup/wiki_backup /root
+
+s cedit /etc/goaccess.conf <<'EOF' || [[ $? == 1 ]]
+# all things found from looking around the default config
+# copied existing NCSA Combined Log Format with Virtual Host, plus %L
+log-format %^:%^ %h %^[%d:%t %^] "%r" %s %b "%R" "%u" %D
+time-format %H:%M:%S
+date-format %d/%b/%Y
+log-file /var/log/apache2/access.log
+color-scheme 2
+
+# tip: copy access.log files to a stretch host directory, then run
+# jessie's goaccess is too old for some options, and it\'s
+# not easily installed from a testing.
+# goaccess --ignore-crawlers -f <(cat *) -a -o html > x.html
+EOF
+
+
+case $distro in
+ trisquel|ubuntu|debian)
+ # unison-gtk second, i want it to be default, not sure if that works
+ # with spa. note, I used to install from testing repo when using stable,
+ # but it shouldn't be needed since I wrote a script to handle mismatching
+ # compilers.
+ spa unison unison-gtk
+ ;;
+arch)
+ spa unison gtk2
+ ;;
+esac
+
+case $distro in
+ arch)
+ # default is alsa, doesn\'t work with with pianobar
+ s dd of=/etc/libao.conf <<'EOF'
+default_driver=pulse
+EOF
+ ;;
+esac
+
+# note, for jessie, it depends on a higher version of btrfs-tools.
+#
+# # disabled due to my patch being in btrbk
+# case $distro in
+# arch|debian|trisquel|ubuntu) pi btrbk ;;
+# # others unknown
+# esac
+cd /a/opt/btrbk
+s make install
+spa pv # for progress bar when running interactively.
+
+# ian: temporarily disabled while hosts are in flux.
+# if [[ $HOSTNAME == tp ]]; then
+# # backup/sync manually on others hosts for now.
+# sgo btrbk.timer
+# # note: to see when it was last run,
+# # ser list-timers
+# fi
+
+
+
+
+case $distro in
+ debian|trisquel|ubuntu) s gpasswd -a iank adm ;; #needed for reading logs
+esac
+
+# tor
+case $distro in
+ # based on
+ # https://www.torproject.org/docs/rpms.html.en
+ # https://www.torproject.org/docs/debian.html.en
+ # todo: figure out if the running service needs to be restarted upon updates
+
+
+ # todo on fedora: setup non-dev packages
+ fedora)
+ s dd of=/etc/yum.repos.d/torproject.repo <<'EOF'
+[tor]
+name=Tor experimental repo
+enabled=1
+baseurl=http://deb.torproject.org/torproject.org/rpm/tor-testing/fc/20/$basearch/
+gpgcheck=1
+gpgkey=http://deb.torproject.org/torproject.org/rpm/RPM-GPG-KEY-torproject.org.asc
+
+[tor-source]
+name=Tor experimental source repo
+enabled=1
+autorefresh=0
+baseurl=http://deb.torproject.org/torproject.org/rpm/tor-testing/fc/20/SRPMS
+gpgcheck=1
+gpgkey=http://deb.torproject.org/torproject.org/rpm/RPM-GPG-KEY-torproject.org.asc
+EOF
+
+ # to be secure, take a look at the fingerprint reported from the following install, and see if it matches from the link above:
+ # 3B9E EEB9 7B1E 827B CF0A 0D96 8AF5 653C 5AC0 01F1
+ sgo tor
+ /a/bin/buildscripts/tor-browser
+ ;;
+ ubuntu)
+ tu /etc/apt/sources.list "deb http://deb.torproject.org/torproject.org $(debian-codename) main"
+ gpg --keyserver keys.gnupg.net --recv 886DDD89
+ gpg --export A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89 | sudo apt-key add -
+ p update
+ pi deb.torproject.org-keyring
+ pi tor
+ /a/bin/buildscripts/tor-browser
+ ;;
+ debian)
+ pi tor
+ /a/bin/buildscripts/tor-browser
+ ;;
+ arch)
+ pi tor tor-browser-en
+ sgo tor
+ ;;
+ # ubuntu unknown
+esac
+
+# nfs server
+case $distro in
+ fedora)
+ end_msg <<'EOF'
+fedora todo: disable the firewall or find a way to automate it.
+there's an unused section in t.org for tramikssion firewall setup
+
+fedora manual config for nfs:
+s firewall-config
+change to permanent configuration
+check the box for nfs
+was hard to figure this out, not sure if this is all needed, but
+unblock these too
+mountd: udp/tcp 20048
+portmapper, in firewall-config its called rpc-bind: udp/tcp 111
+troubleshooting, unblock things in rpcinfo -p
+make sure to reload the firewall to load the persistent configuration
+
+
+EOF
+ pi nfs-utils
+ sgo nfs-server
+ ;;
+ debian|trisquel|ubuntu)
+ pi nfs-server
+ ;;
+ arch)
+ pi nfs-utils || pending_reboot=true
+ sgo rpcbind
+ # this failed until I rebooted
+ sgo nfs-server
+ ;;
+esac
+
+
+
+
+if [[ $HOSTNAME == frodo ]]; then
+ # nohide = export filesystems mounted deeper than the export point
+ # fsid=0 makes this export the "root" export
+ # not documented in the man page, but this means
+ # 1. it can be mounted with a shorthand of server:/
+ # 2. exports that are subdirectories of this one will automatically be mounted
+ tu /etc/exports <<'EOF'
+/k 192.168.1.0/24(rw,fsid=0,nohide,no_root_squash,async,no_subtree_check,insecure)
+EOF
+ s exportfs -rav
+fi
+
+
+e "$end_msg_var"
+
+
+# persistent virtual machines
+case $distro in
+ debian|trisquel|ubuntu)
+ pi libosinfo-bin;
+ ;;
+esac
+# if I was going to create a persistent vm, i might do it like this:
+# variant=something # from: virt-install --os-variant list
+# s virt-install --noautoconsole --graphics spice,listen=0.0.0.0 \
+ # --disk=/a/images/some_name.qcow2,bus=virtio --vcpus 2 -r 4096 -w bridge=br0 \
+ # -n some_name --import --os-variant $variant --cpu host-model-only
+
+
+
+######### begin stuff belonging at the end ##########
+
+
+case $distro in
+ ubuntu|debian)
+ spa spacefm-gtk3 ;;
+ arch)
+ spa spacefm ;;
+esac
+
+
+pi "${simple_packages[@]}"
+
+
+if $pending_reboot; then
+ echo "$0: pending reboot and then finished. doing it now."
+ s reboot now
+else
+ echo "$0: $(date): ending now)"
+fi
+exit 0
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+host=$1
+
+if [[ ! $host || $host == -h ]]; then
+ echo "$0: error: expected 1 arg of hostname"
+ exit 1
+fi
+
+rlu $host /a/bin/distro-setup/
+ssh $host /a/bin/distro-setup/distro-begin
+ssh $host /a/bin/distro-setup/distro-end
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+
+cur="$(host -4 b8.nz iankelling.org | sed -rn 's/.*has address (.*)/\1/p;T;q')"
+ip=$(curl -s4 https://iankelling.org/cgi/pubip)
+
+# note, a simpler way to do this would be to ssh and use
+# "${SSH_CLIENT%% *}
+# to update bind if needed.
+
+if [[ $cur != $ip ]]; then
+ nsupdate -k /p/c/machine_specific/li/filesystem/etc/bind/Kb8.nz.*.private <<EOF
+server iankelling.org
+zone b8.nz
+update delete b8.nz. A
+update add b8.nz. 300 A $ip
+update delete *.b8.nz. A
+update add *.b8.nz. 300 A $ip
+show
+send
+EOF
+fi
+
+# # persistent initial setup for this:
+# # create files in /a/c/machine_specific/li/filesystem/etc/bind
+# # note, conflink also does some group ownership stuff.
+
+# mkc /p/c/machine_specific/li/filesystem/etc/bind
+# s dnssec-keygen -a HMAC-MD5 -b 512 -n HOST b8.nz
+# s chown $USER:$USER *
+
+# f=key.b8.nz
+# cat >$f <<EOF
+# key b8.nz. {
+# algorithm HMAC-MD5;
+# secret "$(awk '$1 == "Key:" {print $2}' Kb8.nz.*.private)";
+# };
+# EOF
+
+# chmod 640 [kK]*
+
+# myunison -ob li
+# ssh li conflink
+# ssh li ser restart bind9
--- /dev/null
+#!/bin/bash
+offlineimap -a fsf
+sieve-filter -eW -o mail_location=maildir:/nocow/user/fsfmd:LAYOUT=fs:INBOX=/nocow/user/fsfmd/INBOX ~/sieve/fsf.sieve INBOX &>/tmp/fsfsieve.log
+
+
+# to test new rules, update fsf-test.sieve, run these commands, then copy new fsf-test.sieve to fsf.sieve
+# sieve-filter -o mail_location=maildir:/nocow/user/fsfmd:LAYOUT=fs:INBOX=/nocow/user/fsfmd/INBOX ~/sieve/fsf-test.sieve INBOX &>/tmp/testfsfsieve.log
+# sed -rn '/^Performed actions:/{n;n;p}' /tmp/testfsfsieve.log | sort -u
--- /dev/null
+#!/bin/bash
+
+source /a/bin/errhandle/errcatch-function
+source /a/bin/errhandle/bash-trace-function
+errcatch
+
+e() { echo "$*"; "$@"; }
+lnf() { /a/bin/lnf/lnf "$@"; }
+
+shopt -s nullglob
+shopt -s extglob
+
+cd /a/bin
+# if we didn't have just automated files, we could cleanup
+# links to not executable files.
+e rm -rf /a/exe
+
+# sourcing instead of calling script changes runtime from .47s to .36s
+source /a/bin/lnf/lnf >/dev/null ||:
+
+for x in !(unused|unfinished|queue|bash-template|buildscripts|crons|data|examples|log-quiet); do
+ [[ -e $x/.git ]] || continue
+ for y in $x/*; do
+ if [[ -x $y && ! -d $y ]]; then
+ lnf -v /a/bin/$y /a/exe
+ fi
+ done
+done
+
+for x in *; do
+ if [[ ! -d $x && -x $x ]]; then
+ lnf -v /a/bin/$x /a/exe
+ fi
+done
+
+# things we don't want to run in /a, because
+# they are long running and could get in the way of
+# btrfs remounting
+/a/bin/distro-setup/install-my-scripts
+/a/bin/log-quiet/setup
--- /dev/null
+#!/bin/bash
+
+# script for guest accounts to install packages
+regex="^[A-Za-z]*$"
+for arg; do
+ if [[ ! $arg =~ $regex ]]; then
+ echo "expected only alphabetical arguments which are passed to apt get"
+ exit 1
+ fi
+done
+apt-get install "$@"
--- /dev/null
+#!/bin/bash -l
+set -x
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# set to oppsite if the order is flipped.
+k2flip=true
+if $k2flip; then
+ k2inorder=false
+else
+ k2inorder=true
+fi
+
+case $HOSTNAME in
+ some_x200_laptop) type=laptop ;;
+ hosts_with_keyboards_attached) type=kinesis ;;
+esac
+
+
+
+case $1 in
+ l) type=laptop ;;
+ k) type=kinesis ;;
+ m) type=model01 ;;
+esac
+
+#set -x
+mi() {
+ xinput --get-feedbacks "$1" | grep "threshold"
+ xinput --get-feedbacks "$1" | grep "accelNum\|accelDenom"
+ xinput --list-props "$1" | grep "Device Accel Profile\|Device Accel Constant Deceleration\|Device Accel Velocity Scaling"
+}
+ms() {
+ xinput --set-ptr-feedback "$1" $2 ${3%/*} ${3#*/}
+ xinput --set-prop "$1" 'Device Accel Profile' $4
+ xinput --set-prop "$1" 'Device Accel Constant Deceleration' $5
+ xinput --set-prop "$1" 'Device Accel Velocity Scaling' $6
+ mi "$1"
+}
+set_device_id() {
+ if [[ $2 ]] && $2; then
+ cmd="tail -n1"
+ else
+ cmd="head -n1"
+ fi
+ if device_id=$(xinput --list | grep "$1" | $cmd); then
+ device_id=${device_id##*id=}
+ device_id=${device_id%%[[:space:]]*}
+ echo "2:$2 device_id=$device_id"
+ else
+ return 1
+ fi
+}
+
+# default via xset q is 660 25. (delay for autorepeat, repeat rate)
+# was at 200 13, but while learning to use keyboardio, i hold keys a bit longer sometimes.
+xset r rate 400 13 # decrease rate delay
+
+if set_device_id "SteelSeries World of Warcraft MMO Gaming Mouse"; then
+ ms "$device_id" 100 1000/1 7 4 1
+ #ms "$device_id" 1 7/2 2 2.5 2
+
+ # makes it compatible with what windows sends using synergy
+ # commented out because i never expect to use windows again
+ #xinput --set-button-map "$device_id" 1 2 3 4 5 6 7 6 7 10 11 12 13 14 15
+
+ # under the new "improved" libinput, the mouse speed/accel has
+ # changed all around and is much more limited.
+ # Other xinput commands will fail and this will succeed.
+ xinput --set-prop "$device_id" "libinput Accel Speed" '.8'
+fi
+
+case $type in
+ model01)
+ xkbcomp /a/c/model01.xkb $DISPLAY
+ ;;
+ laptop)
+ # original saved with: xkbcomp $DISPLAY /a/c/stretch-11-2016.xkb
+ xkbcomp /a/c/x2.xkb $DISPLAY
+ xkbset -m # remove mouse keys
+
+ ;;
+ kinesis)
+ # todo, differentiate for work pc
+ #/a/bin/radl
+ if [[ -z $DISPLAY ]]; then
+ echo "error. empty DISPLAY var" >> /a/gdm-keyboard-error.log
+ else
+ xkbcomp /a/c/.Xkeymap $DISPLAY
+
+ xkbset m # setup mouse keys, which I have altered
+
+ xkbset exp =m # stop mousekeys expiring after a timeout
+
+ cd / # so xbindkeys does not hold open mountpoints
+ killall xbindkeys # having some lag, thinking this might help.
+ xbindkeys # configured to grab left and right scroll button presses
+ fi
+
+ kinesis2=false
+ #right scroll wheel, change from button 4 & 5 to 13 and 14.
+ # also changes the middle click to 12, even though I'm not using it anymore
+ if set_device_id "04d9:048e"; then
+ xinput --set-button-map "$device_id" 1 12 3 13 14 6 7
+ else
+ kinesis2=true # if we are using the 2nd kinesis which has different device ids
+ fi
+ if $kinesis2 && set_device_id "USB OPTICAL MOUSE" $k2flip; then
+ xinput --set-button-map "$device_id" 1 12 3 14 13 6 7
+ fi
+
+
+
+ ms 'Kensington Kensington Slimblade Trackball' 100 4000/1 7 6.5 1.5
+ xinput --set-button-map 'Kensington Kensington Slimblade Trackball' 0 0 0 4 5 6 7 0 9 10 11 12
+
+ # razer naga middle mouse stopped working. it's settings were:
+ #ms "$device_id" 100 1000/1 7 4 1
+
+
+
+ # disable the mouse movements mouse wheel
+ if $kinesis2; then
+ if set_device_id "USB OPTICAL MOUSE" $k2inorder; then
+ xinput --set-prop "$device_id" 'Device Accel Constant Deceleration' 10000
+ # 12 is to effectively disable the middle click button
+ xinput --set-button-map "$device_id" 1 12 3 11 10 6 7
+ fi
+ else
+ if set_device_id "USB Optical Mouse"; then
+ xinput --set-prop "$device_id" 'Device Accel Constant Deceleration' 10000
+ # 12 is to effectively disable the middle click button
+ xinput --set-button-map "$device_id" 1 12 3 10 11 6 7
+ fi
+ fi
+
+ . /a/bin/bash_unpublished/duplicity-gpg-agent-setup
+ ;;
+esac
+
+# for desktop and htpc
+if set_device_id "Logitech Unifying Device"; then
+xinput --set-prop "$device_id" 'Evdev Middle Button Emulation' 1
+fi
+
+#ms 'Kensington Kensington Slimblade Trackball' 1 7/2 2 4 5 = 2.01
+# ms 9 10 20/1 6 4 .2
+#ms 9 10 35/1 6 5 .2 = 1.82
+#ms 9 10 1200/1 7 5 .2 = 1.82 after a practice round
+#
+#ms 9 10 140/1 6 7 .1 = 2.0 after a practice round.
+#feels like the slow is finally too slow, and fast too fast
+# ms 9 10 50/1 6 6 .2 = 1.83 after
+# ms 9 10 90/1 6 6 .1 = 1.86
+#ms 9 10 3000/1 7 6 .1 = 1.81
+#ms 9 1 15/1 3 6 3 = 2.0
+#ms 9 1 10/1 3 4 2 = 1.91
+#ms 9 1 8/1 3 5 4 = 1.98
+#ms 9 1 10/2 2 5 2.5 = 1.99
+#ms 9 100 3000/1 7 5 1 = 1.86
+#ms 9 100 1500/1 7 5 1.5 = 1.87
+#ms 9 100 2200/1 7 6 1.5 = 1.81
+#ms 9 100 2200/1 7 5 1.5 = 1.83
+#ms 9 100 3000/1 7 6 1.5 = 1.88
+#ms 9 1 100/1 4 6 15 = 1.85
+#ms 9 100 3000/1 7 7 1.5 = 1.85, but had a 1.76 on first try...
+# ms 9 100 3500/1 7 7 1.5 = 1.73, 1.68 a1 1.74, 1.83, 1.75, 1.78, 1.76
+# ms 9 100 4000/1 7 7 1.5 = 1.80
+# ms 9 100 4000/1 7 8 1.5 = 1.78
+# ms 9 100 4000/1 7 8 1.8 = 1.88
+# ms 9 100 4800/1 7 8 1.5 = 1.85
+# ms 9 100 4000/1 7 6.5 1.5 = 1.83. stickig with this, upped speeds a bit to make it more practical for normal windows
+# new day
+# ms 9 100 3500/1 7 7 1.5 = 1.92
+# ms 9 1 7/2 2 5 5 = 1.9
+# ms 9 100 3500/1 7 7 1.5 = 1.62
+# felt like having it a bit faster for a while, but switched back
+#ms 'Kensington Kensington Slimblade Trackball' 100 3500/1 7 5 1.5
+
+
+#set +x
+exit 0
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Running these files directly won't be good since we are
+# unmounting the volume they live on.
+# This never really get's run, since we normally only
+# seed these files to other hosts using btrbk-run.
+
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+x="$(readlink -f "$BASH_SOURCE")"; cd ${x%/*}
+
+
+e() { echo "$*"; "$@"; }
+
+# scripts that would interfere with unmounting /a, put them elsewhere
+e install guest-apt mount-latest-subvol check-subvol-stale /usr/local/bin
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?"' ERR
+
+[[ $EUID == 0 ]] || exec sudo "${BASH_SOURCE}" "$@"
+if [[ $- != *i* ]]; then
+ exec &>/var/log/keyscript-off.log
+ echo "$0: starting. $(date)"
+fi
+sed="sed --follow-symlinks"
+
+if [[ $HOSTNAME == kw ]]; then
+ exit 0
+fi
+
+if [[ $($sed -rn 's/^ID=(.*)/\1/p' /etc/os-release) == arch ]]; then
+ if grep -q '^\s*FILES=' /etc/mkinitcpio.conf; then
+ $sed -ri 's/^\s*FILES=/#\0/' /etc/mkinitcpio.conf # comment out
+ mkinitcpio -p linux
+ fi
+else
+ x=/root/keyscript
+ if grep -q "${x}," /etc/crypttab; then
+ $sed -i "s#${x},#${x}-manual,#" /etc/crypttab
+ update-initramfs -u
+ fi
+fi
+
+# switch to easy or hard pass which is the same as luks
+f=/q/root/shadow/traci
+[[ $HOSTNAME != tpnew ]] || usermod -p "$(cat $f)" iank
+echo "$0: finished. $(date)"
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?"' ERR
+
+[[ $EUID == 0 ]] || exec sudo "${BASH_SOURCE}" "$@"
+if [[ $- != *i* ]]; then
+ exec &>>/var/log/keyscript-on.log
+ echo "$0: starting. $(date)"
+fi
+rootn=1
+
+sed="sed --follow-symlinks"
+
+if [[ ! -e /tmp/keyscript-off ]]; then
+ if [[ $($sed -rn 's/^ID=(.*)/\1/p' /etc/os-release) == arch ]]; then
+ if ! grep -q '^\s*FILES=' /etc/mkinitcpio.conf; then
+ $sed -ri 's/^#(\s*FILES=.*)/\1/' /etc/mkinitcpio.conf # uncomment
+ mkinitcpio -p linux
+ fi
+ else
+ x=/root/keyscript
+ if grep -q "${x}-manual," /etc/crypttab; then
+ $sed -i "s#${x}-manual,#${x},#" /etc/crypttab
+ update-initramfs -u
+ fi
+ fi
+fi
+# switch to easy or hard login pass which is the same as luks
+f=/q/root/shadow/traci-simple
+[[ $HOSTNAME != tpnew ]] || usermod -p "$(cat $f)" iank
+
+echo "$0: finished. $(date)"
--- /dev/null
+192.168.1.5 x2
+192.168.1.8 tp
--- /dev/null
+#!/bin/bash
+
+# usage: I run this script on a timer to correct the dns on wrt for
+# clients where dns is updated dynamically by openvpn, in the case that
+# openvpn crashes or the computer running openvpn crashes.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+x="$(readlink -f "$BASH_SOURCE")"; cd ${x%/*} # directory of this file
+
+while read -r ip host; do
+ found=false
+ is_connected="grep -q "^CLIENT_LIST,$host," /run/openvpn-server/status-server.log"
+ if $is_connected; then continue; fi
+ if ! grep -q "^CLIENT_LIST,$host," /run/openvpn-server/status-server.log; then
+ cd $(mktemp -d); dir=$PWD
+ ssh wrt tar -C /etc -c hosts | tar -x
+ if grep -qFx "$ip $host" hosts; then continue; fi
+ # openvpn udpates its status file every 60 seconds by default
+ if (( $(stat -c%Y hosts) > $(( $(date +%s) + 60 )) )); then
+ sleep 60
+ if $is_connected; then continue; fi
+ fi
+ ssh root@wrt.b8.nz cedit ovpn-$host <<<"$ip $host" || [[ $? == 1 ]]
+ fi
+done <lan-dns
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[[ $EUID == 0 ]] || exec sudo "$BASH_SOURCE" "$@"
+
+source /a/bin/errhandle/errcatch-function
+source /a/bin/errhandle/errallow-function
+source /a/bin/errhandle/bash-trace-function
+errcatch
+
+usage() {
+ cat <<'EOF'
+Usage: mail-route start|stop|show
+
+Marks tcp packets on port 25 and 143 to be routed through
+a vpn ip.
+EOF
+ exit $1
+}
+
+if (( $# != 1 )); then
+ usage 1
+fi
+
+start() {
+ iptables_op=-A
+ ip_op=add
+ # systemd around stretch release time, would wait until openvpn actually connected,
+ # so this was unnecessary, but now it returns immediately.
+ found=false
+ for ((i=1; i<=30; i++)); do
+ tun_dev=$(ip a show to 10.8.0.4/24 | sed -rn '1s/^\S+\s+([^:]+).*/\1/p')
+ if [[ $tun_dev == tun* ]]; then
+ found=true
+ break
+ fi
+ sleep 1
+ done
+ if ! $found; then
+ echo "$0: error: timeout waiting for valid tun_dev, currently:$tun_dev"
+ exit 1
+ fi
+ e() { "$@"; }
+ _errcatch_cleanup=stop
+ modify
+}
+stop() {
+ iptables_op=-D
+ ip_op=del
+ tun_dev=$(iptables -t nat -S | sed -rn "s/^-A POSTROUTING -o (tun[[:digit:]]+) -m mark --mark 0x1 -j SNAT --to-source 10.8.0.4$/\1/p"|head -n1) || printf "failed to find tun device.\n"
+ e() { "$@" || printf "maybe ok failure: %s\n" "$*"; }
+ modify
+}
+
+show() {
+ e() { printf "${0##*/}: %s\n" "$*"; "$@"; }
+ e iptables -t mangle -S
+ e iptables -t nat -S
+ e ip rule
+ e ip route show table 1
+
+ tun_dev=$(ip a show to 10.8.0.4/24 | sed -rn '1s/^\S+\s+([^:]+).*/\1/p')
+ if [[ $tun_dev == tun* ]]; then
+ e sysctl net.ipv4.conf.$tun_dev.rp_filter
+ else
+ echo "$0: note, no tun device found"
+ fi
+ exit 0
+}
+
+
+# code common to start and stop.
+modify() {
+ # match source or dest port. note, when we send to a port, it picks a random high port as
+ # the source.
+ for port in 25 143; do # smtp and imap.
+ e iptables -t mangle $iptables_op \
+ OUTPUT -m tcp -p tcp -m multiport --ports $port -j MARK --set-mark 0x1
+ e iptables -t mangle $iptables_op \
+ OUTPUT -m tcp -p tcp -m multiport --ports $port -j MARK --set-mark 0x0 \
+ -d 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
+ # note, we could have used a custom chain and returned instead of setting the mark again.
+ # in case anyone was ever curious, the inverse of private ips is: #0.0.0.0/5,8.0.0.0/7,11.0.0.0/8,12.0.0.0/6,16.0.0.0/4,32.0.0.0/3,64.0.0.0/2,128.0.0.0/3,160.0.0.0/5,168.0.0.0/6,172.0.0.0/12,172.32.0.0/11,172.64.0.0/10,172.128.0.0/9,173.0.0.0/8,174.0.0.0/7,176.0.0.0/4,192.0.0.0/9,192.128.0.0/11,192.160.0.0/13,192.169.0.0/16,192.170.0.0/15,192.172.0.0/14,192.176.0.0/12,192.192.0.0/10,193.0.0.0/8,194.0.0.0/7,196.0.0.0/6,200.0.0.0/5,208.0.0.0/4,224.0.0.0/3
+
+ done
+ e iptables -t nat $iptables_op POSTROUTING -o $tun_dev -m mark --mark 0x1 -j SNAT --to-source 10.8.0.4
+ e ip rule $ip_op fwmark 1 table 1
+ # note, this rule does not persist when the tun interface is deleted
+ e ip route $ip_op default via 10.8.0.1 table 1
+
+ # on debian this is 0 (no filter), on ubuntu it\'s 1, which is no good. 0 or 2 both work fine.
+ # 2 drops it if the packet is not routable, martian address, or my default route is screwed up,
+ # so, eh, might as well. some rhel docs recommend using it.
+ e sysctl net.ipv4.conf.$tun_dev.rp_filter=2
+
+ exit 0
+}
+
+case $1 in
+ start|stop|show) $1 ;;
+ *) usage 1 ;;
+esac
+
+
+# background: something like this does not work for packets which
+# exim is replying to. I don't know why.
+#iptables -t mangle -A OUTPUT -m owner --uid-owner Debian-exim -j MARK --set-mark 0x1
--- /dev/null
+#!/bin/bash
+set -x
+
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+usage() {
+ cat <<EOF
+Usage: ${0##*/} exim4|postfix
+Setup exim4 / postfix / dovecot
+
+The minimal assumption we have is that /etc/mailpass exists
+
+-h|--help Print help and exit.
+EOF
+ exit $1
+}
+
+type=$1
+postfix() { [[ $type == postfix ]]; }
+exim() { [[ $type == exim4 ]]; }
+
+if ! exim && ! postfix; then
+ usage 1
+fi
+
+if [[ ! $SUDO_USER ]]; then
+ echo "$0: error: requires running as nonroot or sudo"
+fi
+u=$SUDO_USER
+
+
+####### begin perstent password instructions ######
+# # exim passwords:
+# # for hosts which have all private files I just use the same user
+# # for other hosts, each one get\'s their own password.
+# # for generating secure pass, and storing for server too:
+# # user=USUALLY_SAME_AS_HOSTNAME
+# user=li
+# f=$(mktemp)
+# apg -m 50 -x 70 -n 1 -a 1 -M CLN >$f
+# s sed -i "/^$user:/d" /p/c/filesystem/etc/exim4/passwd
+# echo "$user:$(mkpasswd -m sha-512 -s <$f)" >>/p/c/filesystem/etc/exim4/passwd
+# echo "mail.iankelling.org $user $(<$f)" >> /p/c/machine_specific/$user/filesystem/etc/mailpass
+# # then run this script, or part of it which uses /etc/mailpass
+
+# # dovecot password, i just need 1 as I\'m the only user
+# mkdir /p/c/filesystem/etc/dovecot
+# echo "ian:$(doveadm pw -s ssha256)::::::" >/p/c/filesystem/etc/dovecot/users
+# conflink
+
+
+
+# # for ad-hoc testing of some random new host sending mail:
+# user=li # client host username & hostname
+# f=$(mktemp)
+# apg -m 50 -x 70 -n 1 -a 1 -M CLN >$f
+# s sed -i "/^$user:/d" /etc/exim4/passwd
+# echo "$user:$(mkpasswd -m sha-512 -s <$f)" | s tee -a /etc/exim4/passwd
+# echo "mail.iankelling.org:$user:$(<$f)" | ssh root@$user dd of=/etc/exim4/passwd.client
+####### end perstent password instructions ######
+
+
+####### begin persistent dkim/dns instructions #########
+# # Remove 1 level of comments in this section, set the domain var
+# # for the domain you are setting up, then run this and copy dns settings
+# # into dns.
+# domain=iankelling.org
+# c /p/c/filesystem/etc/exim4
+# # this has several bugs addressed in comments, but it was helpful
+# # https://debian-administration.org/article/718/DKIM-signing_outgoing_mail_with_exim4
+
+# openssl genrsa -out $domain-private.pem 2048 -outform PEM
+# openssl rsa -in $domain-private.pem -out $domain.pem -pubout -outform PEM
+# # selector is needed for having multiple keys for one domain.
+# # I dun do that, so just use a static one: li
+# echo "txt record name: li._domainkey.$domain"
+# # Debadmin page does not have v=, fastmail does, and this
+# # says it\'s recommended in 3.6.1, default is DKIM1 anyways.
+# # https://www.ietf.org/rfc/rfc6376.txt
+# # Join and print all but first and last line.
+# # last line: swap hold & pattern, remove newlines, print.
+# # lines 2+: append to hold space
+# echo "txt record contents:"
+# echo "v=DKIM1; k=rsa; p=$(sed -n '${x;s/\n//gp};2,$H' $domain.pem)"
+# chmod 644 $domain.pem
+# chmod 640 $domain-private.pem
+# # in conflink, we chown these to group debian
+# conflink
+# # selector was also put into /etc/exim4/conf.d/main/000_localmacros,
+# # via the mail-setup scripts
+
+# # 2017-02 dmarc policies:
+# # host -t txt _dmarc.gmail.com
+# # yahoo: p=reject, hotmail: p=none, gmail: p=none, fastmail none for legacy reasons
+# # there were articles claiming gmail would be changing
+# # to p=reject, in early 2017, which didn\'t happen. I see no sources on them. It\'s
+# # expected to cause problems
+# # with a few old mailing lists, copying theirs for now.
+#
+# echo "dmarc dns, name: _dmarc value: v=DMARC1; p=none; rua=mailto:mailauth-reports@$domain"
+
+# # 2017-02 spf policies:
+# # google ~all, hotmail -all, yahoo: ?all, fastmail ?all
+# # i include fastmail\'s settings, per their instructions,
+# # and follow their policy. In mail in a box, or similar instructions,
+# # I\'ve seen recommended to not use a restrictive policy.
+# echo "spf dns: name is empty, value: v=spf1 a include:spf.messagingengine.com ?all"
+
+# # to check if dns has updated, you do
+# host -a mesmtp._domainkey.$domain
+
+# # mx records,
+# # setting it to iankelling.org would work the same, but this
+# # is more flexible, I could change where mail.iankelling.org pointed.
+# cat <<'EOF'
+# mx records, 2 records each, for * and empty domain
+# pri 10 mail.iankelling.org
+# pri 20 in1-smtp.messagingengine.com
+# pri 30 in2-smtp.messagingengine.com
+# EOF
+####### end persistent dkim instructions #########
+
+
+# misc exim notes:
+# useful exim docs:
+# /usr/share/doc/exim4-base/README.Debian.gz
+# /usr/share/doc/exim4-base/spec.txt.gz
+
+# routers, transports, and authenticators are sections, and you define
+# driver instances in those sections, and the manual calls them driver
+# types but there is also a more specific "type" of driver, which is specified
+# with the driver = some_module setting in the driver.
+
+# the driver option must precede and private options (options that are
+# specific to that driver), so follow example of putting it at beginning.
+
+# The full list of option settings for any particular driver instance,
+# including all the defaulted values, can be extracted by making use of
+# the -bP command line option.
+# exim -bP config_file to see what config file it used
+# exim -bP config to see
+
+# exim clear out message queue. as root:
+# adapted from somewhere on stackoverflow.
+# ser stop exim4; sleep 1; exim -bp | exiqgrep -i | xargs exim -Mrm; ser start exim4
+
+# fastmail has changed their smtp server, but the old one still works,
+# I see no reason to bother changing.
+# New one is smtp.fastmail.com
+
+# test delivery & rewrite settings:
+#exim4 -bt iank@localhost
+
+
+postconfin() {
+ local MAPFILE
+ mapfile -t
+ local s
+ postconf -ev "${MAPFILE[@]}"
+}
+e() { printf "%s\n" "$*"; }
+
+postmaster=$u
+mxhost=mail.iankelling.org
+mxport=25
+forward=$u@$mxhost
+
+# old setup. left as comment for example
+# mxhost=mail.messagingengine.com
+# mxport=587
+# forward=ian@iankelling.org
+
+relayhost="[$mxhost]:$mxport" # postfix
+smarthost="$mxhost::$mxport" # exim
+
+# trisquel 8 = openvpn, debian stretch = openvpn-client
+vpn_ser=openvpn-client
+if [[ ! -e /lib/systemd/system/openvpn-client@.service ]]; then
+ vpn_ser=openvpn
+fi
+
+if [[ $HOSTNAME == $MAIL_HOST ]]; then
+ # afaik, these will get ignored because they are routing to my own
+ # machine, but rm them is safer
+ rm -f $(eval echo ~$postmaster)/.forward /root/.forward
+else
+ # this can\'t be a symlink and has permission restrictions
+ # it might work in /etc/aliases, but this seems more proper.
+ install -m 644 {-o,-g}$postmaster <(e $forward) $(eval echo ~$postmaster)/.forward
+fi
+
+# offlineimap uses this too, it is much easier to use one location than to
+# condition it\'s config and postfix\'s config
+if [[ -f /etc/fedora-release ]]; then
+ /a/exe/lnf -T ca-certificates.crt /etc/ssl/ca-bundle.trust.crt
+fi
+
+if postfix; then
+ # dunno why, but debian installed postfix with builddep emacs
+ # but I will just explicitly install it here since
+ # I use it for sending mail in emacs.
+ if command -v apt-get &> /dev/null; then
+ debconf-set-selections <<EOF
+postfix postfix/main_mailer_type select Satellite system
+postfix postfix/mailname string $HOSTNAME
+postfix postfix/relayhost string $relayhost
+postfix postfix/root_address string $postmaster
+EOF
+ if dpkg -s postfix &>/dev/null; then
+ dpkg-reconfigure -u -fnoninteractive postfix
+ else
+ apt-get -y install --purge --auto-remove postfix
+ fi
+ else
+ source /a/bin/distro-functions/src/package-manager-abstractions
+ pi postfix
+ # Settings from reading the output when installing on debian,
+ # then seeing which were different in a default install on arch.
+ # I assume the same works for fedora.
+ postconfin <<EOF
+mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
+mailbox_size_limit = 0
+relayhost = $relayhost
+inet_interfaces = loopback-only
+EOF
+
+ systemctl enable postfix
+ systemctl start postfix
+ fi
+ # i\'m assuming mail just won\'t work on systems without the sasl_passwd.
+ postconfin <<'EOF'
+smtp_sasl_auth_enable = yes
+smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
+smtp_sasl_security_options = noanonymous
+smtp_tls_security_level = secure
+message_size_limit = 20480000
+smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt
+inet_protocols = ipv4
+EOF
+ # msg_size_limit: I ran into a log file not sending cuz of size. double from 10 to 20 meg limit
+ # inet_protocols: without this, I\'ve had postfix try an ipv6 lookup then gives
+ # up and fail forever. snippet from syslog: type=AAAA: Host not found, try again
+
+
+ f=/etc/postfix/sasl_passwd
+ install -m 600 /dev/null $f
+ cat /etc/mailpass| while read -r domain port pass; do
+ # format: domain port user:pass
+ # mailpass is just a name i made up, since postfix and
+ # exim both use a slightly crazy format to translate to
+ # each other, it\'s easier to use my own format.
+ printf "[%s]:%s %s" "$domain" "$port" "${pass/@/#}" >>$f
+ done
+ postmap hash:/etc/postfix/sasl_passwd
+ # need restart instead of reload when changing
+ # inet_protocols
+ service postfix restart
+
+else # begin exim. has debian specific stuff for now
+
+ if ! dpkg -s openvpn &>/dev/null; then
+ apt-get -y install --purge --auto-remove openvpn
+ fi
+
+ if [[ -e /p/c/filesystem ]]; then
+ # to put the hostname in the known hosts
+ ssh -o StrictHostKeyChecking=no root@li.iankelling.org :
+ /a/exe/vpn-mk-client-cert -b mail -n mail li.iankelling.org
+ fi
+
+ cat >/etc/systemd/system/mailroute.service <<EOF
+[Unit]
+# this unit is configured to start and stop whenever $vpn_ser@mail.service
+# does
+Description=Routing for email vpn
+After=network.target
+BindsTo=$vpn_ser@mail.service
+After=$vpn_ser@mail.service
+
+[Service]
+Type=oneshot
+ExecStart=/a/bin/distro-setup/mail-route start
+ExecStop=/a/bin/distro-setup/mail-route stop
+RemainAfterExit=yes
+
+[Install]
+RequiredBy=$vpn_ser@mail.service
+EOF
+
+ cat >/etc/systemd/system/offlineimapsync.timer <<'EOF'
+[Unit]
+Description=Run offlineimap-sync once every 5 mins
+
+[Timer]
+OnCalendar=*:0/5
+
+[Install]
+WantedBy=timers.target
+EOF
+
+ cat >/etc/systemd/system/offlineimapsync.service <<EOF
+[Unit]
+Description=Offlineimap sync
+After=multi-user.target
+
+[Service]
+User=$u
+Type=oneshot
+ExecStart=/a/bin/log-quiet/sysd-mail-once offlineimap-sync /a/bin/distro-setup/offlineimap-sync
+EOF
+ systemctl daemon-reload
+ systemctl enable mailroute
+
+ # wording of question from dpkg-reconfigure exim4-config
+ # 1. internet site; mail is sent and received directly using SMTP
+ # 2. mail sent by smarthost; received via SMTP or fetchmail
+ # 3. mail sent by smarthost; no local mail
+ # 4. local delivery only; not on a network
+ # 5. no configuration at this time
+ #
+ # Note, I have used option 2 in the past for receiving mail
+ # from lan hosts, sending external mail via another smtp server.
+ #
+ # Note, other than configtype, we could set all the options in
+ # both types of configs without harm, they would either be
+ # ignored or be disabled by other settings, but the default
+ # local_interfaces definitely makes things more secure.
+
+ # most of these settings get translated into settings
+ # in /etc/exim4/update-exim4.conf.conf
+ # how /etc/exim4/update-exim4.conf.conf translates into actual exim settings is
+ # documented in man update-exim4.conf, which outputs to the config that
+ # exim actually reads. except the man page is not perfect, for example,
+ # it doesn't document that it sets
+ # DCconfig_${dc_eximconfig_configtype}" "1"
+ # which is a line from update-exim4.conf, which is a relatively short bash script.
+ # mailname setting sets /etc/mailname
+
+ debconf-set-selections <<EOF
+exim4-config exim4/use_split_config boolean true
+EOF
+
+ source /a/bin/bash_unpublished/source-semi-priv
+ exim_main_dir=/etc/exim4/conf.d/main
+ mkdir -p $exim_main_dir
+
+
+
+ #### begin mail cert setup ###
+ f=/usr/local/bin/mail-cert-cron
+ cat >$f <<'EOF'
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo "$BASH_SOURCE" "$@"
+
+f=/a/bin/bash_unpublished/source-semi-priv
+if [[ -e $f ]]; then
+ source $f
+fi
+if [[ $HOSTNAME == $MAIL_HOST ]]; then
+ local_mx=mail.iankelling.org
+ rsync_common="rsync -ogtL --chown=root:Debian-exim --chmod=640 root@li:/etc/letsencrypt/live/$local_mx/"
+ ${rsync_common}fullchain.pem /etc/exim4/exim.crt
+ ${rsync_common}privkey.pem /etc/exim4/exim.key
+fi
+EOF
+ chmod 755 $f
+
+ cat >/etc/systemd/system/mailcert.service <<'EOF'
+[Unit]
+Description=Mail cert rsync
+After=multi-user.target
+
+[Service]
+Type=oneshot
+ExecStart=/a/bin/log-quiet/sysd-mail-once mailcert /usr/local/bin/mail-cert-cron
+EOF
+
+ cat >/etc/systemd/system/mailcert.timer <<'EOF'
+[Unit]
+Description=Run mail-cert once a day
+
+[Timer]
+OnCalendar=daily
+
+[Install]
+WantedBy=timers.target
+EOF
+ systemctl daemon-reload
+ systemctl start mailcert
+ systemctl restart mailcert.timer
+ systemctl enable mailcert.timer
+
+ ##### end mailcert setup #####
+
+
+
+ if [[ $HOSTNAME == $MAIL_HOST ]]; then
+
+ debconf-set-selections <<EOF
+# Mail Server configuration
+# -------------------------
+
+# Please select the mail server configuration type that best meets your needs.
+
+# Systems with dynamic IP addresses, including dialup systems, should generally be
+# configured to send outgoing mail to another machine, called a 'smarthost' for
+# delivery because many receiving systems on the Internet block incoming mail from
+# dynamic IP addresses as spam protection.
+
+# A system with a dynamic IP address can receive its own mail, or local delivery can be
+# disabled entirely (except mail for root and postmaster).
+
+# 1. internet site; mail is sent and received directly using SMTP
+# 2. mail sent by smarthost; received via SMTP or fetchmail
+# 3. mail sent by smarthost; no local mail
+# 4. local delivery only; not on a network
+# 5. no configuration at this time
+
+# General type of mail configuration: 1
+exim4-config exim4/dc_eximconfig_configtype select internet site; mail is sent and received directly using SMTP
+
+
+
+# The 'mail name' is the domain name used to 'qualify' mail addresses without a domain
+# name.
+
+# This name will also be used by other programs. It should be the single, fully
+# qualified domain name (FQDN).
+
+# Thus, if a mail address on the local host is foo@example.org, the correct value for
+# this option would be example.org.
+
+# This name won\'t appear on From: lines of outgoing messages if rewriting is enabled.
+
+# System mail name:
+exim4-config exim4/mailname string mail.iankelling.org
+
+
+
+
+# Please enter a semicolon-separated list of recipient domains for which this machine
+# should consider itself the final destination. These domains are commonly called
+# 'local domains'. The local hostname (treetowl.lan) and 'localhost' are always added
+# to the list given here.
+
+# By default all local domains will be treated identically. If both a.example and
+# b.example are local domains, acc@a.example and acc@b.example will be delivered to the
+# same final destination. If different domain names should be treated differently, it
+# is necessary to edit the config files afterwards.
+
+# Other destinations for which mail is accepted:
+# iank.bid is for testing
+# mail.iankelling.org is for machines i own
+exim4-config exim4/dc_other_hostnames string *.iankelling.org;iankelling.org;*iank.bid;iank.bid;*zroe.org;zroe.org;*.b8.nz;b8.nz
+
+
+
+
+# Please enter a semicolon-separated list of IP addresses. The Exim SMTP listener
+# daemon will listen on all IP addresses listed here.
+
+# An empty value will cause Exim to listen for connections on all available network
+# interfaces.
+
+# If this system only receives mail directly from local services (and not from other
+# hosts), it is suggested to prohibit external connections to the local Exim daemon.
+# Such services include e-mail programs (MUAs) which talk to localhost only as well as
+# fetchmail. External connections are impossible when 127.0.0.1 is entered here, as
+# this will disable listening on public network interfaces.
+
+# IP-addresses to listen on for incoming SMTP connections:
+exim4-config exim4/dc_local_interfaces string
+
+
+
+
+# Mail for the 'postmaster', 'root', and other system accounts needs to be redirected
+# to the user account of the actual system administrator.
+
+# If this value is left empty, such mail will be saved in /var/mail/mail, which is not
+# recommended.
+
+# Note that postmaster\'s mail should be read on the system to which it is directed,
+# rather than being forwarded elsewhere, so (at least one of) the users listed here
+# should not redirect their mail off this machine. A 'real-' prefix can be used to
+# force local delivery.
+
+# Multiple user names need to be separated by spaces.
+
+# Root and postmaster mail recipient:
+exim4-config exim4/dc_postmaster string $postmaster
+
+
+
+# Exim is able to store locally delivered email in different formats. The most commonly
+# used ones are mbox and Maildir. mbox uses a single file for the complete mail folder
+# stored in /var/mail/. With Maildir format every single message is stored in a
+# separate file in ~/Maildir/.
+
+# Please note that most mail tools in Debian expect the local delivery method to be
+# mbox in their default.
+
+# 1. mbox format in /var/mail/ 2. Maildir format in home directory
+
+# Delivery method for local mail: 2
+exim4-config exim4/dc_localdelivery select Maildir format in home directory
+EOF
+ # MAIN_HARDCODE_PRIMARY_HOSTNAME might mess up the
+ # smarthost config type, not sure. all other settings
+ # would be unused in that config type.
+ cat >$exim_main_dir/000_localmacros <<EOF
+# i don't have ipv6 setup for my tunnel yet.
+disable_ipv6 = true
+
+MAIN_TLS_ENABLE = true
+
+DKIM_CANON = relaxed
+DKIM_SELECTOR = li
+
+# from comments in
+# https://debian-administration.org/article/718/DKIM-signing_outgoing_mail_with_exim4
+
+# The file is based on the outgoing domain-name in the from-header.
+DKIM_DOMAIN = \${lc:\${domain:\$h_from:}}
+# sign if key exists
+DKIM_PRIVATE_KEY= \${if exists{/etc/exim4/\${dkim_domain}-private.pem} {/etc/exim4/\${dkim_domain}-private.pem}}
+
+
+# failing message on mail-tester.com:
+# We check if there is a server (A Record) behind your hostname treetowl.
+# You may want to publish a DNS record (A type) for the hostname treetowl or use a different hostname in your mail software
+# https://serverfault.com/questions/46545/how-do-i-change-exim4s-primary-hostname-on-a-debian-box
+# and this one seemed appropriate from grepping config.
+# I originally set this to li.iankelling.org, but then ended up with errors when li tried to send
+# mail to treetowl, so this should basically be a name that no host has as their
+# canonical hostname since the actual host sits behind a nat and changes.
+# Seems logical for this to be the same as mailname.
+MAIN_HARDCODE_PRIMARY_HOSTNAME = mail.iankelling.org
+
+# normally empty, I set this so I can set the envelope address
+# when doing mail redelivery to invoke filters
+MAIN_TRUSTED_GROUPS = $u
+
+LOCAL_DELIVERY = dovecot_lmtp
+
+# options exim has to avoid having to alter the default config files
+CHECK_RCPT_LOCAL_ACL_FILE = /etc/exim4/rcpt_local_acl
+CHECK_DATA_LOCAL_ACL_FILE = /etc/exim4/data_local_acl
+
+# debian exim config added this in 2016 or so?
+# it's part of the smtp spec, to limit lines to 998 chars
+# but a fair amount of legit mail does not adhere to it. I don't think
+# this should be default, like it says in
+# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=828801
+# todo: the bug for introducing this was about headers, but
+# the fix maybe is for all lines? one says gmail rejects, the
+# other says gmail does not reject. figure out and open a new bug.
+IGNORE_SMTP_LINE_LENGTH_LIMIT = true
+
+# most of the ones that gmail seems to use.
+# Exim has horrible default of signing unincluded
+# list- headers since they got mentioned in an
+# rfc, but this messes up mailing lists, like gnu/debian which want to
+# keep your dkim signature intact but add list- headers.
+DKIM_SIGN_HEADERS = mime-version:in-reply-to:references:from:date:subject:to
+
+EOF
+
+
+ ####### begin dovecot setup ########
+ # based on a little google and package search, just the dovecot
+ # packages we need instead of dovecot-common.
+ #
+ # dovecot-lmtpd is for exim to deliver to dovecot instead of maildir
+ # directly. The reason to do this is to use dovecot\'s sieve, which
+ # has extensions that allow it to be almost equivalent to exim\'s
+ # filter capabilities, some ways probably better, some worse, and
+ # sieve has the benefit of being supported in postfix and
+ # proprietary/weird environments, so there is more examples on the
+ # internet. I was torn about whether to do this or not, meh.
+ apt-get -y install --purge --auto-remove \
+ dovecot-core dovecot-imapd dovecot-sieve dovecot-lmtpd
+
+ # if we changed 90-sieve.conf and removed the active part of the
+ # sieve option, we wouldn\'t need this, but I\'d rather not modify a
+ # default config if not needed. This won\'t work as a symlink in /a/c
+ # unfortunately.
+ sudo -u $postmaster /a/exe/lnf -T sieve/main.sieve $(eval echo ~$postmaster)/.dovecot.sieve
+
+ sed -ri -f - /etc/dovecot/conf.d/10-mail.conf <<'EOF'
+1i mail_location = maildir:/m/md:LAYOUT=fs:INBOX=/m/md/INBOX
+/^\s*mail_location\s*=/d
+EOF
+
+ cat >/etc/dovecot/conf.d/20-lmtp.conf <<EOF
+protocol lmtp {
+#per https://wiki2.dovecot.org/Pigeonhole/Sieve/Configuration
+ mail_plugins = \$mail_plugins sieve
+# default was
+ #mail_plugins = \$mail_plugins
+
+# For a normal setup with exim, we need something like this, which
+# removes the domain part
+# auth_username_format = %Ln
+#
+# or else # Exim says something like
+# "LMTP error after RCPT ... 550 ... User doesn't exist someuser@somedomain"
+# Dovecot verbose log says something like
+# "auth-worker(9048): passwd(someuser@somedomain): unknown user"
+# reference: http://wiki.dovecot.org/LMTP/Exim
+#
+# However, I use this to direct all mail to the same inbox.
+# A normal way to do this, which I did at first is to have
+# a router in exim almost at the end, eg 950,
+#local_catchall:
+# debug_print = "R: catchall for \$local_part@\$domain"
+# driver = redirect
+# domains = +local_domains
+# data = $u
+# based on
+# http://blog.alteholz.eu/2015/04/exim4-and-catchall-email-address/
+# with superflous options removed.
+# However, this causes the envelope to be rewritten,
+# which makes filtering into mailboxes a little less robust or more complicated,
+# so I've done it this way instead. it also requires
+# modifying the local router in exim.
+ auth_username_format = $u
+}
+
+EOF
+
+
+ cat >/etc/dovecot/local.conf <<'EOF'
+# so I can use a different login that my shell login for mail. this is
+# worth doing solely for the reason that if this login is compromised,
+# it won't also compromise my shell password.
+!include conf.d/auth-passwdfile.conf.ext
+
+# settings derived from wiki and 10-ssl.conf
+ssl = required
+ssl_cert = </etc/exim4/exim.crt
+ssl_key = </etc/exim4/exim.key
+# https://github.com/certbot/certbot/raw/master/certbot-apache/certbot_apache/options-ssl-apache.conf
+# in my cert cronjob, I check if that has changed upstream.
+ssl_cipher_list = ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
+
+# ian: added this, more secure, per google etc
+ssl_prefer_server_ciphers = yes
+
+# for debugging info, uncomment these.
+# logs go to syslog and to /var/log/mail.log
+# auth_verbose=yes
+#mail_debug=yes
+EOF
+ ####### end dovecot setup ########
+
+
+ systemctl enable offlineimapsync.timer
+ systemctl start offlineimapsync.timer
+ systemctl restart $vpn_ser@mail
+ systemctl enable $vpn_ser@mail
+ systemctl enable dovecot
+ systemctl restart dovecot
+
+ else # $HOSTNAME != $MAIL_HOST
+ systemctl disable offlineimapsync.timer &>/dev/null ||:
+ systemctl stop offlineimapsync.timer &>/dev/null ||:
+ systemctl disable $vpn_ser@mail
+ systemctl stop $vpn_ser@mail
+ systemctl disable dovecot ||:
+ systemctl stop dovecot ||:
+ #
+ #
+ # would only exist because I wrote it i the previous condition,
+ # it\'s not part of exim
+ rm -f $exim_main_dir/000_localmacros
+ debconf-set-selections <<EOF
+exim4-config exim4/dc_eximconfig_configtype select mail sent by smarthost; no local mail
+exim4-config exim4/dc_smarthost string $smarthost
+# the default, i think is from /etc/mailname. better to set it to
+# whatever the current fqdn is.
+exim4-config exim4/mailname string $(hostname -f)
+EOF
+
+ fi # end $HOSTNAME != $MAIL_HOST
+
+ # if we already have it installed, need to reconfigure, without being prompted
+ if dpkg -s exim4-config &>/dev/null; then
+ # gotta remove this, otherwise the set-selections are completely
+ # ignored. It woulda been nice if this was documented somewhere!
+ rm -f /etc/exim4/update-exim4.conf.conf
+ dpkg-reconfigure -u -fnoninteractive exim4-config
+ fi
+
+ # i have the spool directory be common to distro multi-boot, so
+ # we need the uid to be the same. 608 cuz it's kind of in the middle
+ # of the free system uids.
+ IFS=:; read _ _ uid _ < <(getent passwd Debian-exim ); unset IFS
+ IFS=:; read _ _ gid _ < <(getent group Debian-exim ); unset IFS
+ if [[ ! $uid ]]; then
+ # from /var/lib/dpkg/info/exim4-base.postinst, plus uid and gid options
+ adduser --uid 608 --gid 608 --system --group --quiet --home /var/spool/exim4 \
+ --no-create-home --disabled-login --force-badname Debian-exim
+ elif [[ $uid != 608 ]]; then
+ systemctl stop exim4 ||:
+ usermod -u 608 Debian-exim
+ groupmod -g 608 Debian-exim
+ usermod -g 608 Debian-exim
+ find / /nocow -xdev -uid $uid -exec chown -h 608 {} +
+ find / /nocow -xdev -gid $gid -exec chgrp -h 608 {} +
+ fi
+
+ # light version of exim does not have sasl auth support.
+ apt-get -y install --purge --auto-remove exim4-daemon-heavy spamassassin
+
+
+
+
+ ##### begin spamassassin config
+ systemctl enable spamassassin
+ # per readme.debian
+ sed -i '/^\s*CRON\s*=/d' /etc/default/spamassassin
+ e CRON=1 >>/etc/default/spamassassin
+ # just noticed this in the config file, seems like a good idea.
+ sed -i '/^\s*NICE\s*=/d' /etc/default/spamassassin
+ e 'NICE="--nicelevel 15"' >>/etc/default/spamassassin
+ systemctl start spamassassin
+ systemctl reload spamassassin
+
+ cat >/etc/systemd/system/spamddnsfix.service <<'EOF'
+[Unit]
+Description=spamd dns bug fix cronjob
+
+[Service]
+Type=oneshot
+ExecStart=/a/bin/distro-setup/spamd-dns-fix
+EOF
+ # 2017-09, debian closed the bug on this saying upstream had fixed it.
+ # remove this when i\'m using the newer package, ie, debian 10, or maybe
+ # ubuntu 18.04.
+ cat >/etc/systemd/system/spamddnsfix.timer <<'EOF'
+[Unit]
+Description=run spamd bug fix script every 10 minutes
+
+[Timer]
+OnActiveSec=60
+# the script looks back 9 minutes into the journal,
+# it takes a second to run,
+# so lets run every 9 minutes and 10 seconds.
+OnUnitActiveSec=550
+
+[Install]
+WantedBy=timers.target
+EOF
+ systemctl daemon-reload
+ systemctl restart spamddnsfix.timer
+ systemctl enable spamddnsfix.timer
+ #
+ ##### end spamassassin config
+
+
+
+
+
+ cat >/etc/exim4/rcpt_local_acl <<'EOF'
+# Only hosts we control send to mail.iankelling.org, so make sure
+# they are all authed.
+# Note, if we wanted authed senders for all domains,
+# we could make this condition in acl_check_mail
+deny
+ message = ian trusted domain recepient but no auth
+ !authenticated = *
+ domains = mail.iankelling.org
+EOF
+ cat >/etc/exim4/data_local_acl <<'EOF'
+# Except for the "condition =", this was
+# a comment in the check_data acl. The comment about this not
+# being suitable is mostly bs. The only thing related I found was to
+# add the condition =, cuz spamassassin has problems with big
+# messages and spammers don't bother with big messages,
+# but I've increased the size from 10k
+# suggested in official docs, and 100k in the wiki example because
+# those docs are rather old and I see a 110k spam message
+# pretty quickly looking through my spam folder.
+ warn
+ condition = ${if < {$message_size}{2000K}}
+ spam = Debian-exim:true
+ add_header = X-Spam_score: $spam_score\n\
+ X-Spam_score_int: $spam_score_int\n\
+ X-Spam_bar: $spam_bar\n\
+ X-Spam_report: $spam_report
+
+EOF
+ cat >/etc/exim4/conf.d/auth/29_exim4-config_auth <<'EOF'
+# from 30_exim4-config_examples
+
+plain_server:
+driver = plaintext
+public_name = PLAIN
+server_condition = "${if crypteq{$auth3}{${extract{1}{:}{${lookup{$auth2}lsearch{CONFDIR/passwd}{$value}{*:*}}}}}{1}{0}}"
+server_set_id = $auth2
+server_prompts = :
+.ifndef AUTH_SERVER_ALLOW_NOTLS_PASSWORDS
+server_advertise_condition = ${if eq{$tls_in_cipher}{}{}{*}}
+.endif
+EOF
+
+ cat >/etc/exim4/conf.d/router/900_exim4-config_local_user <<'EOF'
+### router/900_exim4-config_local_user
+#################################
+
+# This router matches local user mailboxes. If the router fails, the error
+# message is "Unknown user".
+
+local_user:
+ debug_print = "R: local_user for $local_part@$domain"
+ driver = accept
+ domains = +local_domains
+# ian: commented this, in conjunction with a dovecot lmtp
+# change so I get mail for all users.
+# check_local_user
+ local_parts = ! root
+ transport = LOCAL_DELIVERY
+ cannot_route_message = Unknown user
+EOF
+ cat >/etc/exim4/conf.d/transport/30_exim4-config_dovecot_lmtp <<'EOF'
+dovecot_lmtp:
+ driver = lmtp
+ socket = /var/run/dovecot/lmtp
+ #maximum number of deliveries per batch, default 1
+ batch_max = 200
+EOF
+
+ cat >/etc/exim4/conf.d/router/190_exim4-config_fsfsmarthost <<'EOF'
+# smarthost for fsf mail
+# ian: copied from /etc/exim4/conf.d/router/200_exim4-config_primary, and added senders = and
+# replaced DCsmarthost with mail.fsf.org
+fsfsmarthost:
+ debug_print = "R: smarthost for $local_part@$domain"
+ driver = manualroute
+ domains = ! +local_domains
+ senders = *@fsf.org
+ transport = remote_smtp_smarthost
+ route_list = * mail.fsf.org byname
+ host_find_failed = ignore
+ same_domain_copy_routing = yes
+ no_more
+EOF
+
+ # https://blog.dhampir.no/content/make-exim4-on-debian-respect-forward-and-etcaliases-when-using-a-smarthost
+ # i only need .forwards, so just doing that one.
+ cd /etc/exim4/conf.d/router
+ b=userforward_higher_priority
+ # replace the router name so it is unique
+ sed -r s/^\\S+:/$b:/ 600_exim4-config_userforward >175_$b
+
+ # begin setup passwd.client
+ f=/etc/exim4/passwd.client
+ rm -f /etc/exim4/passwd.client
+ install -m 640 -g Debian-exim /dev/null $f
+ cat /etc/mailpass| while read -r domain port pass; do
+ # reference: exim4_passwd_client(5)
+ printf "%s:%s\n" "$domain" "$pass" >>$f
+ done
+ # end setup passwd.client
+
+ systemctl restart exim4
+
+fi #### end if exim4
+
+# /etc/alias setup is debian specific, and
+# exim config sets up an /etc/alias from root to the postmaster, which i
+# config to ian, as long as there exists an entry for root, or there was
+# no preexisting aliases file. based on the postinst file. postfix
+# won\'t set up a root to $postmaster alias if it\'s already installed.
+# Since postfix is not the greatest, just set it ourselves.
+if [[ $postmaster != root ]]; then
+ sed -i --follow-symlinks -f - /etc/aliases <<EOF
+\$a root: $postmaster
+/^root:/d
+EOF
+ newaliases
+fi
+
+# put spool dir in directory that spans multiple distros.
+# based on http://www.postfix.org/qmgr.8.html and my notes in gnus
+#
+# todo: I\'m suspicious of uids for Debian-exim being the same across
+# distros. It would be good to test this.
+dir=/nocow/$type
+sdir=/var/spool/$type
+# we only do this if our system has $dir
+if [[ -e /nocow && $(readlink -f $sdir) != $dir ]]; then
+ systemctl stop $type
+ if [[ ! -e $dir && -d $sdir ]]; then
+ mv $sdir $dir
+ fi
+ /a/exe/lnf -T $dir $sdir
+fi
+
+systemctl restart $type
+systemctl enable $type
+
+# MAIL_HOST also does radicale, and easier to start and stop it here
+# for when MAIL_HOST changes, so radicale gets the synced files and
+# does not stop us from remounting /o.
+if dpkg -s radicale &>/dev/null; then
+ if [[ $HOSTNAME == $MAIL_HOST ]]; then
+ systemctl restart radicale
+ systemctl enable radicale
+ if [[ -e /etc/logrotate.d/radicale.disabled ]]; then
+ mv /etc/logrotate.d/radicale{.disabled,}
+ fi
+ else
+ systemctl stop radicale
+ systemctl disable radicale
+ # weekly logrotate tries to restart radicale even if it's a disabled service in flidas.
+ if [[ -e /etc/logrotate.d/radicale ]]; then
+ mv /etc/logrotate.d/radicale{,.disabled}
+ fi
+ fi
+fi
+exit 0
+
+# if I wanted the from address to be renamed and sent to a different address,
+# echo "sdx@localhost development@localhost" | sudo dd of=/etc/postfix/recipient_canonical
+# sudo postmap hash:/etc/postfix/recipient_canonical
+# sudo service postfix reload
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2017 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+
+usage() {
+ cat <<EOF
+Usage: ${0##*/}
+Initial setup for maru debian for nexus 5.
+
+We do this separately because it's constrained for space much more than
+any other host. The maru debian takes up about 1.5G. We start with about
+6.8G free. We want to keep a few gigs free to take pictures and
+movies. I should really trade this in for a 32GB version.
+
+-h|--help Print help and exit.
+EOF
+ exit $1
+}
+
+case $1 in
+ *) usage ;;
+esac
+
+
+scp $(readlink -f $(which rootsshsync)) maru-init maru@n5:
+ssh -t maru@n5 ./rootsshsync
+
+ssh root@n5 bash <<'EOF'
+echo "ian ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
+echo n5 >/etc/hostname
+sed -i '/^127\.0\.1\.1/d' /etc/hosts
+echo "127.0.1.1 n5.lan n5" >>/etc/hosts
+hostname -F /etc/hostname
+
+kill $(pgrep -U maru)
+usermod -l ian -m -d /home/ian maru
+groupmod -n ian maru
+useradd -m -s /bin/bash traci
+EOF
+
+# then do myunison n5,
+# then do conflink.
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+# based on
+# https://github.com/tootsuite/documentation/blob/master/Running-Mastodon/Docker-Guide.md
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+cd /home/iank/mastodon
+git fetch
+git stash
+git checkout $(git tag | grep -v rc | tail -n1)
+git stash pop
+docker-compose build
+# these 2 may not be needed in all upgrades, but
+# simpler to just do them always.
+docker-compose run --rm web rake db:migrate
+docker-compose run --rm web rake assets:precompile
+# restart the app
+docker-compose up -d
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+script_dir=$(dirname $(readlink -f "$BASH_SOURCE"))
+
+if [[ ! $@ ]]; then
+ echo "mount-latest-remote: error: expected 1 or more host arguments"
+ exit 1
+fi
+ret=0
+
+for tg; do
+ scp $script_dir/{mount-latest-subvol,check-subvol-stale} \
+ root@$tg:/usr/local/bin
+ if ! ssh root@$tg bash <<'EOF'
+set -e
+chmod +x /usr/local/bin/{mount-latest-subvol,check-subvol-stale}
+/usr/local/bin/mount-latest-subvol
+EOF
+ then
+ echo "$0: warning: failed mount-latest-subvol on $tg"
+ ret=1
+ fi
+done
+exit $ret
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# usage: mount-latest-subvol
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+errcatch() {
+ set -E; shopt -s extdebug
+ _err-trap() {
+ err=$?
+ exec >&2
+ set +x
+ echo "${BASH_SOURCE[1]}:${BASH_LINENO[0]}:in \`$BASH_COMMAND' returned $err"
+ bash-trace 2
+ echo "$0: exiting with code $err"
+ exit $err
+ }
+ trap _err-trap ERR
+ set -o pipefail
+}
+bash-trace() {
+ local -i argc_index=0 frame i start=${1:-1} max_indent=8 indent
+ local source
+ local extdebug=false
+ if [[ $(shopt -p extdebug) == *-s* ]]; then
+ extdebug=true
+ fi
+
+ for ((frame=0; frame < ${#FUNCNAME[@]}-1; frame++)); do
+ argc=${BASH_ARGC[frame]}
+ argc_index+=$argc
+ ((frame < start)) && continue
+ if (( ${#BASH_SOURCE[@]} > 1 )); then
+ source="${BASH_SOURCE[frame+1]}:${BASH_LINENO[frame]}:"
+ fi
+ indent=$((frame-start+1))
+ indent=$((indent < max_indent ? indent : max_indent))
+ printf "%${indent}s↳%sin \`%s" '' "$source" "${FUNCNAME[frame]}"
+ if $extdebug; then
+ for ((i=argc_index-1; i >= argc_index-argc; i--)); do
+ printf " %s" "${BASH_ARGV[i]}"
+ done
+ fi
+ echo \'
+ done
+}
+errcatch
+
+tu() {
+ while read -r line; do
+ file="$1"
+ grep -xFq "$line" "$file" || tee -a "$file"<<<"$line"
+ done
+}
+e() { printf "%s\n" "$*"; "$@"; }
+mnt() {
+ dir=$1
+ if ! mountpoint $dir &>/dev/null; then
+ mkdir -p $dir
+ e mount $dir
+ fi
+}
+fstab() {
+ while read -r start mpoint end; do
+ l="$start $mpoint $end"
+ # kill off any lines that duplicate the mount point.
+ sed --follow-symlinks -ri "\%$l%b;\%^\s*\S+\s+$mpoint\s%d" /etc/fstab
+ tu /etc/fstab <<<"$l"
+ done
+}
+kill-dir() {
+ found_pids=false
+ sig=${1:-TERM}
+ if pids=$(timeout 4 lsof -t $dir); then
+ found_pids=true
+ timeout 4 lsof -w $dir
+ kill -$sig $pids
+ fi
+ # fuser will find open sockets that lsof won't, for example from gpg-agent.
+ # note: -v shows kernel processes, which then doesn't return true when we want
+ if timeout 4 fuser -m $dir &>/dev/null; then
+ found_pids=true
+ fuser -$sig -mvk $dir
+ fi
+ if $found_pids; then
+ sleep .5
+ return 0
+ fi
+ return 1
+}
+
+force=false
+if [[ $1 == -f ]]; then
+ force=true
+fi
+
+ret=0
+
+##### begin setup fstab for subvols we care about ######
+first_root_crypt=$(awk '$2 == "/" {print $1}' /etc/mtab)
+fstab <<EOF
+$first_root_crypt /a btrfs noatime,subvol=a 0 0
+EOF
+
+shopt -s nullglob
+
+f=(/mnt/root/btrbk/q.*)
+if [[ -e $f ]]; then
+ fstab <<EOF
+$first_root_crypt /q btrfs noatime,subvol=q 0 0
+/q/p /p none bind 0 0
+EOF
+fi
+
+f=(/mnt/root/btrbk/o.*)
+if [[ -e $f ]]; then
+ fstab <<EOF
+$first_root_crypt /o btrfs noatime,subvol=o 0 0
+/o/m /m none bind 0 0
+EOF
+fi
+
+if [[ $HOSTNAME == frodo ]]; then
+ fstab <<EOF
+$first_root_crypt /i btrfs noatime,subvol=i 0 0
+EOF
+fi
+##### end setup fstab for subvols we care about ######
+
+for vol in q a o i; do
+ d=/$vol
+ if ! awk '{print $2}' /etc/fstab | grep -xF $d &>/dev/null; then
+ continue
+ fi
+
+
+ ##### begin building up list of bind mounts ######
+ binds=() # list of bind mounts
+ roots=($d) # list of bind mounts, plus the original mount
+ while true; do
+ new_roots=()
+ for r in ${roots[@]}; do
+ # eg. when r=/q/p, for lines like
+ # /q/p /p none bind 0 0
+ # output /p
+ new_roots+=($(sed -rn "s#^$r/\S+\s+(\S+)\s+none\s+bind\s.*#\1#p" /etc/fstab))
+ done
+ (( ${#new_roots} )) || break
+ binds+=(${new_roots[@]})
+ roots=( ${new_roots[@]} )
+ done
+ ##### end building up list of bind mounts ######
+
+
+ # if latest is already mounted, make sure binds are mounted and move on
+ if e check-subvol-stale $d; then
+ mnt $d
+ for b in ${binds[@]}; do
+ mnt $b
+ done
+ continue
+ fi
+
+ fresh_snap=$(</nocow/btrfs-stale/$vol)
+ if [[ ! $fresh_snap ]]; then
+ echo "$0: error. empty fresh_snap var"
+ ret=1
+ continue
+ fi
+
+ umount_ret=true
+ unmounted=()
+ for dir in $(echo $d ${binds[*]}\ |tac -s\ ); do
+ if mountpoint $dir; then
+ if e umount -R $dir; then
+ unmounted+=($dir)
+ else
+ kill-dir || kill-dir INT || kill-dir HUP || ! $force || kill-dir KILL ||:
+
+ if e umount -R $dir; then
+ unmounted+=($dir)
+ else
+ echo "$0: failed to umount $dir"
+ umount_ret=false
+ ret=1
+ continue
+ fi
+ fi
+ fi
+ done
+
+ if ! $umount_ret; then
+ for dir in ${unmounted[@]}; do
+ mnt $dir
+ done
+ continue
+ fi
+
+ # todo: decipher /mnt/root, like we do in check-subvol-stale
+ cd /mnt/root
+ if [[ -e $vol ]]; then
+ e mv $vol $vol.leaf.$(date +%Y%m%dT%H%M%S%z)
+ fi
+ # Note, we make a few assumptions in this script, like
+ # $d was not a different subvol id than $vol, and
+ # things otherwise didn't get mounted very strangely.
+ e btrfs sub snapshot $fresh_snap $vol
+ for dir in $d ${binds[@]}; do
+ e mnt $dir
+ done
+ stale_dir=/nocow/btrfs-stale
+ rm -f $stale_dir/$d
+done
+
+### disabled
+if [[ $HOSTNAME == treetowlxxxxxxxxx ]]; then
+ # partitioned it with fai partitioner outside of fai,
+ # because it\'s worth it to have 1% space reserved for boot and
+ # swap partitions in case I ever want to boot off those drives.
+ # as root:
+ # . /a/bin/fai/fai-wrapper
+ # eval-fai-classfile /a/bin/fai/fai/config/class/51-multi-boot
+ # fai-setclass ROTATIONAL
+ # export LUKS_DIR=/q/root/luks/
+ # # because the partition nums existed already
+ # fai-setclass REPARTITION
+ # /a/bin/fai/fai/config/hooks/partition.DEFAULT
+
+ devs=(
+ ata-TOSHIBA_MD04ACA500_84REK6NTFS9A-part1
+ ata-TOSHIBA_MD04ACA500_84R2K773FS9A-part1
+ ata-TOSHIBA_MD04ACA500_8471K430FS9A-part1
+ ata-TOSHIBA_MD04ACA500_8481K493FS9A-part1
+ )
+ first=true
+ for dev in ${devs[@]}; do
+ if $first; then
+ first=false
+ tu /etc/fstab <<EOF
+/dev/mapper/crypt_dev_$dev /i btrfs noatime,subvol=i,noauto 0 0
+/dev/mapper/crypt_dev_$dev /mnt/iroot btrfs noatime,subvolid=0,noauto 0 0
+EOF
+ fi
+ tu /etc/crypttab <<EOF
+crypt_dev_$dev /dev/disk/by-id/$dev /q/root/luks/host-treetowl discard,luks
+EOF
+ if [[ ! -e /dev/mapper/crypt_dev_$dev ]]; then
+ cryptdisks_start crypt_dev_$dev
+ fi
+ done
+ # note, could do an else here and have some kind of mount for /i
+ # on other hosts.
+fi
+
+exit $ret
--- /dev/null
+#!/bin/bash -l
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+# cd /k/music
+# find -type f -name '*.flac' | while read -r f; do
+# mkdir -p "../flacs/$(dirname "$f")"
+# mv -T "$f" ../flacs/"$f"
+# done
+
+
+
+# todo, add settings from /etc/default/nfs-{common,kernel-server}
+# todo: do mysql setup. kodi install. mysql backup.
+
+# in kodi, music, add files, named source, add network share,
+# server address: iank.life
+# path: k/music
+
+
+
+rm -f /a/tmp/y.sql
+
+cd /k/music
+find -type f \( -name '*.flac' -or -name '*.mp3' -or -name '*.m4a' \) | while read -r f; do
+ rating=$(kid3-cli -c "get RATING" "$f")
+ if [[ ! $rating ]]; then
+ echo $f
+ continue
+ fi
+ rating=$((rating*2))
+
+ ## begin sql escaping
+ f="${f//\"/\\\"}"
+ f="${f//\'/\\\'}"
+ f="${f//_/\\_}"
+ f="${f//%/\\%}"
+ ## end sql escaping
+ d=${f%/*}
+ d=${d#./}/ # use exact dir format that is in database
+ cat >>/a/tmp/y.sql <<EOF
+update song
+inner join path on song.idPath = path.idPath
+set song.userrating = $rating
+where song.strFileName = '${f##*/}' and path.strPath = 'nfs://iank.life/k/music/$d';
+EOF
+done
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+shopt -s extglob nullglob
+
+echo '[Default Applications]' >/etc/xdg/defaults.list
+sed -r '/^MimeType=/!d;s/^MimeType=//;s/;/=mpv.desktop\n/g' /usr/share/applications/mpv.desktop >>/etc/xdg/defaults.list
+
+# this is the new one, according to
+# https://specifications.freedesktop.org/mime-apps-spec/mime-apps-spec-1.0.html
+# https://wiki.debian.org/MIME
+# https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+cp /etc/xdg/defaults.list /etc/xdg/mimapps.list
+
+for f in /usr/share/applications/!(defaults).list; do
+ cp /etc/xdg/defaults.list /etc/xdg/${f##*/}
+done
+
+update-desktop-database
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# I failed at figuring out how to make a script happen early in reboot
+# but not on shutdown with systemd, so I use this to shutdown
+# and ask for a key.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+if [[ $EUID != 0 ]]; then s=sudo; fi
+
+$s touch /tmp/keyscript-off
+$s poweroff
--- /dev/null
+#!/bin/bash -l
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+# note, we could have done machine specific subvols,
+# but there were so few files, it just didn't seem
+# necessary to have an rw copy on those machines
+# which did not have the whole p subvol.
+
+gen_args=()
+do_snapshot=false
+batch=false
+while [[ $1 ]]; do
+ case $1 in
+ -ob) gen_args+=(--ours -b); batch=true; shift ;;
+ -b) gen_args+=(-b); batch=true; shift ;;
+ -n) do_snapshot=true; shift ;;
+ -h|--help)
+ echo "$0: help is head of $BASH_SOURCE:"
+ head -n 30 "$BASH_SOURCE"
+ exit 0
+ ;;
+ --) shift; break ;;
+ -*) gen_args+=($1) ; shift ;;
+ *) host=$1; shift ;;
+ esac
+done
+if [[ ! $host ]]; then
+ host=$1
+fi
+
+if [[ $host == demohost ]]; then
+ do_snapshot=false
+fi
+
+# arch and debian don't play nice with unison.
+# "connection lost" on unison-gtk, with bignum errs in term,
+# and simply freezing on cli, all means compiled under
+# different ocaml versions.
+# I had the great realization that unison is a self contained
+# binary with no special runtime dependencies.
+# However, I got a gtk error running the ui version.
+# todo: I should test pushing my unison version instead
+# of pulling it.
+echo "$0: running: gen ${gen_args[@]}"
+/p/c/subdir_files/.unison/gen ${gen_args[@]}
+if [[ ! $host ]]; then
+ echo "$0: error: expected argument for hostname"
+ exit 1
+fi
+
+
+# todo: this doesn't deal with if the host is running arch too
+
+# debian uses this path, and has gtk version linked as /usr/bin/unison,
+# but I'd rather not push the gtk version
+push_path=/usr/bin/unison-latest-stable
+if [[ ! -e $push_path ]]; then
+ push_path=/usr/bin/unison
+fi
+# note, i'd prefer to use /usr/local/bin, but it's not in arch's default
+# path for root.
+rsync -L /usr/bin/unison-latest-stable root@$host:/usr/bin/unison
+
+if $do_snapshot; then
+ unison-snapshot
+ if ! ssh root@$host "! type unison-snapshot || unison-snapshot"; then
+ if [[ ! ${gen_args[@]} ]]; then
+ # gen_args are all about syncing with new systems where
+ # we could expect the remote snapshot to not be available yet.
+ echo "$0: error: failed remote unison-snapshot"
+ exit 1
+ fi
+ fi
+fi
+
+if $batch; then
+ # todo, figure out what the cli version is called in non-debian distros
+ f=unison-latest-stable
+else
+ f=unison-gtk
+ type -t $f &>/dev/null || f=unison-gtk2 # for arch
+fi
+
+if s $f "$host"; then
+ if [[ $host == frodo ]]; then
+ # only send data to the machine i do full backups to
+ p=/p/c/machine_specific/$HOSTNAME/.unison/
+ s rsync -rlptgohi $p root@$host:$p
+ fi
+ p=/p/c/machine_specific/$host/.unison/
+ s rsync -rlptgohvi root@$host:$p $p
+fi
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+offlineimap -u quiet
+shopt -s nullglob
+
+omv() { # offlineimap mv. move mail files within $src_base/$1 to /m/md/$2
+ src="$1"
+ dst="$2"
+ found_files=false
+ for x in new cur; do
+ files=("$src_base"/"$src"/$x/*)
+ if [[ $files ]]; then
+ found_files=true
+ mv "${files[@]}" /m/md/"$dst"/$x
+ fi
+ done
+}
+
+src_base=/m/offlineimap
+omv "Sent Items" "Sent"
+omv INBOX offlineimaptmp
+src_base=/m/md
+if $found_files; then
+ sieve-filter -eW ~/sieve/main.sieve offlineimaptmp &>/dev/null
+ # the default folder is INBOX for anything leftover
+ omv offlineimaptmp INBOX
+ # remove messages from remote host
+ offlineimap -u quiet
+ # this makes us sit and wait when we want to use mu and this is running in a cronjob.
+ # todo: emacs updates the index much faster. what command is it running? I'd like
+ # to just run that
+ # looks like it might be mu index --lazy-check, but that still takes like 10 seconds,
+ # figure out if that is the same speed, or if we can make it faster.
+ #mu index &>/dev/null ||:
+fi
--- /dev/null
+#!/bin/bash
+ssh li mongodump >/dev/null # too verbose by default
+rsync -r --delete root@li:{/home/iank/dump,/home/pumpio/pumpdata} /w/backup/pump
--- /dev/null
+#!/bin/bash -l
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+
+# davdroid setup instructions at the bottom
+
+# main docs:
+# http://radicale.org/user_documentation/
+# https://davdroid.bitfire.at/configuration/
+
+# note on debugging: if radicale can't bind to the address,
+# in the log it just says "Starting Radicale". If you run
+# it in the foreground, it will give more info. Background
+# plus debug does not help.
+# sudo -u radicale radicale -D -f
+
+# created password file with:
+# htpasswd -c /etc/davpass dav
+
+vpn_ser=openvpn-client
+if [[ ! -e /lib/systemd/system/openvpn-client@.service ]]; then
+ vpn_ser=openvpn
+fi
+
+d=/etc/systemd/system/radicale.service.d
+mkdir -p $d
+sudo dd of=$d/override.conf <<EOF
+[Unit]
+# this unit is configured to start and stop whenever openvpn-client@mail.service
+# does
+After=network.target
+BindsTo=$vpn_ser@mail.service
+After=$vpn_ser@mail.service
+
+[Install]
+RequiredBy=openvpn-client@mail.service
+EOF
+ser daemon-reload # not sure this is needed
+
+pi-nostart radicale
+
+# I moved /var/lib/radicale after it's initialization.
+# I did a sudo -u radicale git init in the collections subfolder
+# after it gets created, per the git docs.
+lnf -T /o/radicale /var/lib/radicale
+
+# from https://www.williamjbowman.com/blog/2015/07/24/setting-up-webdav-caldav-and-carddav-servers/
+
+# more config is for li in distro-end
+
+# coment in this file says this is needed for it to run on startup
+sed -ri 's/^\s*#+\s*(ENABLE_RADICALE\s*=\s*yes\s*)/\1/' /etc/default/radicale
+
+setini() {
+ key="$1" value="$2" section="$3"
+ file="/etc/radicale/config"
+ sed -ri "/ *\[$section\]/,/^ *\[[^]]+\]/{/^\s*$key[[:space:]=]/d};/ *\[$section\]/a $key = $value" "$file"
+}
+
+# comments say default is 0.0.0.0:5232
+setini hosts 10.8.0.4:5232 server
+
+if [[ $HOSTNAME == $MAIL_HOST ]]; then
+ sgo radicale
+fi
+
+# disable power management feature, set to 240 min sync interval,
+# so it shouldn't be bad.
+
+# davdroid from f-druid.
+# login with url and user name
+# username ian,
+# url https://cal.iankelling.org
+# username ian
+# pass, see password manager
+#
+# add account dialog:
+#
+# set account name as ian@iankelling.org, per help text below the
+# field.
+#
+# switch to groups are per-contact categories,
+# per https://davdroid.bitfire.at/configuration/radicale/
+#
+#
+# After setting up account, I added one address book, named
+# ianaddr. calender was already created, named ian. checked boxes under
+# both. synced.
+#
+# To restore from old phone to new phone, I wiped all data out, then copied over the newly created files. I think
+#
+# ignorable background info:
+#
+# opentasks uses the calendar file.
+#
+# The address book I created got a uuid as a name for the file. Note
+# the .props file says if it's a calendar or addressbook.
+#
+# When debugging, tailed /var/log/radicale/radicale.log and apache log,
+# both show the requests happening. Without creating the address book,
+# after creating a contact, a sync would delete it.
+#
+# Address books correspond to .props files in the radicale dir.
+#
+# Some background is here,
+# https://davdroid.bitfire.at/faq/entry/cant-manage-groups-on-device/
+# which shows separate vcard option is from rfc 6350, the other is 2426,
+# radicale page says it implements the former not the latter,
+# which conflicts with the documentation of which to select, but whatever.
+# http://radicale.org/technical_choices/
+# https://davdroid.bitfire.at/faq/entry/cant-manage-groups-on-device/
+#
+# Note, url above says only cayanogenmod 13+ and omnirom can manage groups.
+
+# Note, radicale had built-in git support to track changes, but they
+# removed it in 2.0.
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+p="$(lsof -i tcp:31416 -Fp -a -c java | sed -rn 's/^p//p')" ||:
+if [[ $p ]]; then
+ kill $p
+fi
+
+# so we don't hold open the /a fs
+sudo rsync -r --delete /a/opt/gnirehtet /opt
+cd /opt/gnirehtet
+
+# just running it in foreground so it's easier to kill etc.
+./gnirehtet rt
+
+
+# note, to kill it, you need to also kill
+# lsof -i tcp:31416
+# and maybe unplug adb cable
+# note, ping results in log message: dropping invalid packet
+
+
+#./gnirehtet rt &>/tmp/rt &
+
+#tail -F /tmp/rt ||:
--- /dev/null
+#!/bin/bash
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo "$BASH_SOURCE" "$@"
+
+if test -e /q/root/.ssh; then
+ dest=/q/root/.ssh
+ /a/exe/lnf $dest /root
+else
+ dest=/root/.ssh
+ mkdir -p $dest
+ chmod 700 $dest
+fi
+
+user_ssh_dir=$(eval echo ~${SUDO_USER:-$USER})/.ssh
+
+# remove broken links, or else rsync has error about them.
+find $user_ssh_dir -xtype l -exec rm '{}' \;
+# -t times, so it won't rewrite the file every time,
+# -L resolve links
+rsync -rtL --delete $user_ssh_dir/ $dest
+chown -R root:root /root/.ssh
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+for n in stretch jessie; do
+ if [[ -e /etc/schroot/chroot.d/$n.conf ]]; then
+ cd
+ schroot -c $n -- apt-get -y dist-upgrade --purge --auto-remove
+ fi
+done
--- /dev/null
+#!/bin/bash
+
+# for duplicity source build
+PATH="$PATH:/usr/local/bin"
+
+# [--retry] interval_name [max_age]
+# takes 2 arguments specifying the name of the subfolder,
+# and optionally, the max age of the backup
+
+# this script setup by adding a user crontab. see t.org for the script
+# it's also setup to email me only when it fails, and only for daily or weekly runs
+
+# uncomment for debugging, prints all commands to stdout
+#set -x
+
+set -E
+#trap 'echo trapped error from \"$BASH_COMMAND\" returned $? line $LINENO; accumulated_errors=true' ERR
+trap 'echo trapped err: $?; accumulated_errors=true' ERR
+
+exec 3>&1 4>&2
+exec &>> /tmp/small-backup.log
+
+echo "BEGIN: $(date): args $*"
+
+
+# only works with a single letter, ie 2D, not 2D12h
+half-time() {
+ local time_word
+ local letter=${1##*[0-9]}
+ case $letter in
+ s) time_wrod=second ;;
+ m) time_word=minute ;;
+ h) time_word=hour ;;
+ D) time_word=day ;;
+ W) time_word=week ;;
+ M) time_word=month ;;
+ Y) time_word=year ;;
+ esac
+ echo "${1%%$letter} $time_word"
+ local x=$(date +%s -d "${1%%$letter} $time_word")
+ local y=$(date +%s)
+}
+
+
+if [[ $1 == --retry ]]; then
+ shift
+ x=0
+ while pid=( $(pidof -o %PPID -x ${0##*/}) ) && (( ${#pid[@]} > 1 )) && (( x < 20 )); do
+ x=$(( x + 1 ))
+ sleep 30
+ done
+ if [[ $x == 20 ]]; then
+ ps -F ${pid[@]}
+ echo timeout error: existing ${0##*/} running for over 5 minutes >&2
+ exit 1
+ fi
+else
+ if pid=( $(pidof -o %PPID -x ${0##*/}) ) && (( ${#pid[@]} > 1 )); then
+ echo ps -F ${pid[@]}
+ ps -F ${pid[@]}
+ echo error: existing ${0##*/} running >&2
+ exit 1
+ fi
+fi
+
+interval=$1
+max_age=$2
+full_backup_arg=""
+if [[ $max_age ]]; then
+ full_backup_arg="--full-if-older-than $(half-time $max_age)"
+fi
+
+rbackup () {
+
+ local d=$1
+ shift
+ local dest=root@li::/root/rdiff-backups/${d##*/}/${interval}
+
+ c="rdiff-backup $* --create-full-path $d $dest"
+ echo "$c"; $c
+
+ if [[ $max_age ]]; then
+ c="rdiff-backup --force --remove-older-than $max_age $dest"
+ echo "$c"; $c
+
+ fi
+}
+
+
+rbackup /a/bin --exclude /a/bin/fai-basefiles
+rbackup /a/c
+
+# this is populated after input_setup.sh is run on login
+
+ssh root@li mkdir -p /root/duplicity-backups/p/$interval
+source /p/duplicity/gpg_agent_env
+duplicity_dest=rsync://root@li//root/duplicity-backups/p/$interval
+
+x=(/p/*)
+if ((${#x[@]} > 1)); then
+ set -x
+ # archive-dir is sort of a persistent cache
+ duplicity --use-agent \
+ --encrypt-sign-key E969C67B \
+ --include-globbing-filelist /p/duplicity/filelist \
+ --archive-dir /p/duplicity/archive \
+ --tempdir /p/tmp \
+ $full_backup_arg /p $duplicity_dest
+ if [[ $max_age ]]; then
+ duplicity --use-agent \
+ remove-all-but-n-full 2 --force $duplicity_dest
+ fi
+ set +x
+fi
+# example restore command. We only need to make the first argument be a url for it to know it to do restore
+# the archive-dir and tempdir args are not needed
+# duplicity --use-agent --encrypt-sign-key E969C67B --archive-dir /p/duplicity/archive --tempdir /p/tmp ssh://root@li//root/duplicity-backups/p/weekly /p/duptest
+
+
+echo END
+
+# to restore duplicity. see man for additional options
+# duplicity --use-agent restore ...
+if [[ $accumulated_error ]]; then
+ eccho "tail -n 50 of /tmp/small-backup.log:"
+ tail -n 50 /tmp/small-backup.log
+ exit 1
+fi
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]]
+
+# to deal with this bug until it\'s fixed
+# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=741521
+# I just happened to notice it in my journal.
+str="dns: sendto() to \S\+ failed: Connection refused, failing over"
+if journalctl --since=-9m --unit=spamassassin | \
+ grep "$str" &>/dev/null; then
+ echo "dns bug, restarting spamassassin"
+ systemctl restart spamassassin
+fi
--- /dev/null
+#!/bin/bash -l
+# Copyright (C) 2016 Ian Kelling
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ $EUID != 0 ]]; then
+ sudo "$0"
+ exit
+fi
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?"' ERR
+
+cd $(dirname $0)
+teeu /etc/ssh/ssh_config 'SendEnv INSIDE_EMACS BASH_LOGIN_SHELL COLUMNS'
+teeu /etc/ssh/sshd_config 'AcceptEnv INSIDE_EMACS BASH_LOGIN_SHELL COLUMNS'
+# get rid of useless motd stuff
+sed -i --follow-symlinks 's/^\s*PrintLastLog .*/PrintLastLog no/' /etc/ssh/sshd_config
+rm -f /etc/update-motd.d/10-help-text /etc/update-motd.d/00-header
+
+
+if isdeb; then
+ # fyi: debconf-set-selections doesn't like mixing tabs and spaces
+ echo "debconf debconf/frontend select Readline" | debconf-set-selections
+ service ssh reload
+else
+ systemctl reload sshd
+fi
--- /dev/null
+#!/bin/bash
+set -x
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+usage() {
+ cat <<EOF
+Usage: ${0##*/} OLD_HOST NEW_HOST
+
+Adjust home network dns so NEW_HOST resolves locally if it is on the
+local network. Turn off mail receiving on OLD_HOST, run btrbk to move
+mail to NEW_HOST, turn on mail receiving on NEW_HOST.
+
+-w Don't try to ssh to wrt. Should only be used in unusual network situation.
+-h|--help Print help and exit.
+
+Note: Uses GNU getopt options parsing style
+EOF
+ exit $1
+}
+
+##### begin command line parsing ########
+
+update_wrt=true # default
+temp=$(getopt -l help wh "$@") || usage 1
+eval set -- "$temp"
+while true; do
+ case $1 in
+ -w) update_wrt=false; shift ;;
+ -h|--help) usage ;;
+ --) shift; break ;;
+ *) echo "$0: Internal error! unexpected args: $*" ; exit 1 ;;
+ esac
+done
+
+
+(( $# == 2 )) || usage 1
+
+old_host=$1
+new_host=$2
+source /a/bin/bash_unpublished/source-semi-priv
+
+if [[ $old_host != $MAIL_HOST ]]; then
+ read -p "warning: \$old_host != \$MAIL_HOST: $old_host != $MAIL_HOST, proceed? y/N "
+ if [[ $REPLY != [yY] ]]; then
+ exit 1
+ fi
+fi
+
+if [[ $new_host == "$HOSTNAME" ]]; then
+ localhost_new=true
+ new_shell=
+else
+ localhost_new=false
+ new_shell="ssh $new_host"
+fi
+
+old_shell="ssh $old_host"
+if [[ $old_host == "$HOSTNAME" ]]; then
+ old_shell=
+fi
+
+if [[ ! $new_host || ! $old_host ]]; then
+ echo "$0: bad args. see script"
+ exit 1
+fi
+
+at_home=false
+if [[ $HOSTNAME == tp ]] || [[ $HOSTNAME == frodo ]] || timeout -s 9 5 ssh wrt.b8.nz :; then
+ at_home=true
+fi
+echo "$0: at_home = $at_home"
+
+source /a/bin/bash_unpublished/source-semi-priv
+#### begin convert private hostnames to public hostnames ####
+if ! $at_home; then
+ for var in old_host new_host; do
+ case ${!var} in
+ tp)
+ eval $var=$HOME_DOMAIN
+ ;;
+ esac
+ done
+fi
+#### end convert private hostnames to public hostnames ####
+
+
+# because our port forward is not robust enough, we can't use proxy command,
+# todo: setup vpn so this is all taken care of.
+if ! $update_wrt; then
+ wrt_shell=:
+elif $at_home; then
+ wrt_shell="ssh wrt.b8.nz"
+else
+ if [[ $old_host == iank.vpn.office.fsf.org || $new_host == iank.vpn.office.fsf.org ]]; then
+ wrt_shell="ssh iank.vpn.office.fsf.org ssh wrt.b8.nz"
+ else
+ wrt_shell="ssh $HOME_DOMAIN ssh wrt.b8.nz"
+ fi
+fi
+
+btrbk_test="systemctl is-active btrbk.service"
+while $new_shell $btrbk_test || $old_shell $btrbk_test; do
+ echo "$0: btrbk is running on new or old host. sleeping for 8 seconds"
+ sleep 6
+ echo "$0: testing for btrbk activity in 2 seconds"
+ sleep 2
+done
+
+########### end initial processing, begin actually modifying things ##########
+
+restore_new_btrbk=false
+if $new_shell systemctl is-active btrbk.timer; then
+ $new_shell sudo systemctl stop btrbk.timer
+ restore_new_btrbk=true
+fi
+restore_old_btrbk=false
+if $old_shell systemctl is-active btrbk.timer; then
+ $old_shell sudo systemctl stop btrbk.timer
+ restore_old_btrbk=true
+fi
+
+for ((i=0; i<10; i++)); do
+ $old_shell killall arbtt-capture || break
+ sleep 1
+ if [[ i == 9 ]]; then
+ warn="WARNING!!! failed to kill arbtt-capture"
+ fi
+ done
+
+
+$new_shell bash -s <<'EOF'
+set -eE
+if mountpoint /m; then sudo umount /m; fi
+if mountpoint /o; then sudo umount /o; fi
+EOF
+
+# if new_host is not on home network, make mail.iankelling.org not resolve
+# on the home network.
+if [[ $new_host == $HOSTNAME ]] && ! $at_home; then
+ echo | $wrt_shell cedit mail_host /etc/hosts || [[ $? == 1 ]] # 1 means file changed.
+else
+ $wrt_shell bash -s <<EOFOUTER
+ cedit mail_host /etc/hosts <<EOF || /etc/init.d/dnsmasq restart
+\$(grep "\b$new_host\b" /etc/hosts | awk '{print \$1}') mail.iankelling.org
+EOF
+EOFOUTER
+fi
+
+mail-setup() {
+ shell="$1"
+ $shell sed -ri "s/MAIL_HOST=.*/MAIL_HOST=$new_host/" /a/bin/bash_unpublished/source-semi-priv
+ $shell /a/bin/distro-setup/mail-setup exim4
+}
+
+mail-setup "$old_shell"
+
+sudo dd of=/etc/btrbk.conf <<'EOF'
+ssh_identity /root/.ssh/home
+# Just a guess that local7 is a good facility to pick.
+# It's a bit odd that the transaction log has to be logged to
+# a file or syslog, while other output is sent to std out.
+# The man does not mention a way for them to be together, but
+# I dunno if setting a log level like warn might also output
+# transaction info.
+transaction_syslog local7
+
+# so we only run one at a time
+lockfile /var/lock/btrbk.lock
+
+# default format of short does not accomidate hourly preservation setting
+timestamp_format long-iso
+
+# only make a snapshot if things have changed
+snapshot_create onchange
+# I could make this different from target_preserve,
+# if one disk had less space.
+# for now, keeping them equal.
+snapshot_preserve 36h 14d 8w 24m
+snapshot_preserve_min 4h
+snapshot_dir btrbk
+
+# so, total backups = ~89
+target_preserve 36h 14d 8w 24m
+target_preserve_min 4h
+
+# if something fails and it's not obvious, try doing
+# btrbk -l debug -v dryrun
+
+
+EOF
+
+
+if $localhost_new; then
+ btrbk_src=ssh://$old_host/mnt/root
+ btrbk_dst=/mnt/root/btrbk
+else
+ btrbk_src=/mnt/root
+ btrbk_dst=ssh://$new_host/mnt/root/btrbk
+fi
+
+sudo tee -a /etc/btrbk.conf <<EOF
+volume $btrbk_src
+subvolume o
+target send-receive $btrbk_dst
+EOF
+
+
+sudo btrbk -l debug --progress run
+$new_shell mount-latest-subvol
+
+mail-setup "$new_shell"
+
+if $restore_new_btrbk; then
+ $new_shell sudo systemctl start btrbk.timer
+fi
+if $restore_old_btrbk; then
+ $old_shell sudo systemctl start btrbk.timer
+fi
+
+$new_shell arbtt-capture --sample-rate=10 &
+
+echo $warn
--- /dev/null
+#!/bin/bash
+
+source /a/bin/errhandle/errcatch-function
+source /a/bin/errhandle/bash-trace-function
+errcatch
+
+cd /
+
+old=$1
+new=$HOSTNAME
+
+if [[ ! $old ]]; then
+ echo "$0: error: no \$1 given, should be old host"
+ exit 1
+fi
+
+if ! timeout -s 9 2 ssh $old : ; then
+ echo "$0: error: can't ssh to $old"
+fi
+
+for p in emacs firefox pidgin; do
+ ssh $old killall $p ||:
+done
+
+# note: duplicated in check-subvol-stale
+last_a=$(
+ vol=a
+ for s in /mnt/root/btrbk/$vol.*; do
+ f=${s##*/}
+ unix_time=$(date -d $(sed -r 's/(.{4})(..)(.{5})(..)(.*)/\1-\2-\3:\4:\5/' <<<${f#$vol.}) +%s)
+ printf "%s\n" $unix_time
+ done | sort -r | head -n 1
+ )
+
+if [[ $last_a != [0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9] ]]; then
+ echo "$0: error: last_a bad value: $last_a"
+ exit 1
+fi
+
+# if last_a is recent enough, skip doing btrbk
+if (( last_a < $(date +%s) - 60*60 )); then
+ if ! ssh $old btrbk-run -pvt $new; then
+ echo "$0: error: failed btrbk-run"
+ exit 1
+ fi
+fi
+
+switch-mail-host $old $new
--- /dev/null
+*filter
+:INPUT ACCEPT
+:FORWARD ACCEPT
+:OUTPUT ACCEPT
+# prevent transmission daemon from doing anything outside it's
+# network namespace.
+-A OUTPUT -m owner --uid-owner debian-transmission -j REJECT
+COMMIT
--- /dev/null
+*filter
+:INPUT ACCEPT
+:FORWARD ACCEPT
+:OUTPUT ACCEPT
+COMMIT
--- /dev/null
+# format from iptables-save. [0:0] are comments of packet-count/byte-count
+# which I removed
+*filter
+:INPUT DROP
+:FORWARD ACCEPT
+:OUTPUT DROP
+# from ip route, we can deduce that traffic goes to the
+# local 10.8.0.x tun0, then to the normal interface.
+# For the normal interface, we allow only some ports:
+# dns, vpn, transmission-remote.
+# dns is only used to resolve the vpn server ip on initial
+# connection.
+# rules are mirror on input and output, just for extra safety,
+# although just having output should do fine.
+
+# We could also firewall from outside the nat, for example like this,
+# but I'm thinking this is simpler.
+#-A FORWARD -i brvpn -p udp -m udp --dport 1194:1195 -j ACCEPT
+#-A FORWARD -i brvpn -j REJECT
+
+# prevent dns leaks, openvpn runs as root, allow root to
+# make non-vpn dns calls, but not transmission which does not run as root.
+# openvpn needs this in order to lookup the ip of the vpn server
+# before it's connected to it. We could hardcode the vpn ips in the
+# config, but our vpn service provider gave us dns, so the ip might change.
+-A OUTPUT -p udp -m udp --dport 53 -m owner --uid-owner root -j ACCEPT
+-A INPUT -p udp -m udp --sport 53 -j ACCEPT
+
+-A OUTPUT -p tcp -m tcp --dport 53 -m owner --uid-owner root -j ACCEPT
+-A INPUT -p tcp -m tcp --sport 53 -j ACCEPT
+
+-A OUTPUT -p tcp -m tcp --sport 9091 -j ACCEPT
+-A INPUT -p tcp -m tcp --dport 9091 -j ACCEPT
+
+# 1195 is used for the secondary vpn server
+-A OUTPUT -p udp -m udp --dport 1194:1195 -j ACCEPT
+-A INPUT -p udp -m udp --sport 1194:1195 -j ACCEPT
+
+-A OUTPUT -o tun0 -j ACCEPT
+-A INPUT -i tun0 -j ACCEPT
+
+# makes debugging things easier
+-A INPUT -p icmp -j ACCEPT
+-A OUTPUT -p icmp -j ACCEPT
+COMMIT
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+# Usage: run when switching from an untrusted network like public wifi
+# to a trusted one.
+
+rm -f /etc/dnsmasq.d/untrusted-network.conf
+
+systemctl reload dnsmasq
--- /dev/null
+#!/bin/bash -l
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+sv() { echo "$@"; "$@"; }
+
+date=unison-$(date "+%Y:%m:%d")
+for mountp in /mnt/root/q /mnt/iroot/i; do
+ [[ -e $mountp ]] || continue
+ date_path=${mountp%/*}/$date
+ todays=( $date_path-* )
+ IFS=$'\n' todays=($(sort -Vr <<<"${todays[*]}")); unset IFS
+ if [[ -e ${todays[0]} ]]; then
+ snapshot=$date_path-$(( ${todays[0]##*-} + 1 ))
+ else
+ snapshot=$date_path-1
+ fi
+ sv s btrfs subvolume snapshot -r $mountp $snapshot
+ existing=( ${mountp%/*}/unison-* )
+ IFS=$'\n' existing=($(sort -Vr <<<"${existing[*]}")); unset IFS
+ # keep 10 unison snapshots around
+ if (( ${#existing[@]} > 10 )); then
+ sv s btrfs subvolume delete ${existing[@]:10}
+ fi
+done
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+[[ $EUID == 0 ]] || exec sudo -E "$BASH_SOURCE" "$@"
+
+# Usage: use when switching from a trusted network to an untrusted one,
+# like public wifi.
+
+cat >/etc/dnsmasq.d/untrusted-network.conf <<'EOF'
+server=8.8.4.4
+server=8.8.8.8
+server=2001:4860:4860::8844
+server=2001:4860:4860::8888
+no-resolv
+EOF
+
+systemctl reload dnsmasq
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+x="$(readlink -f "$BASH_SOURCE")"; cd ${x%/*} # directory of this file
+
+# see lan-dyn-dns-update. this is the corresponding script for on connect/disconnect from vpn
+
+d=/p/ovpn-ssh
+ssh_cmd="ssh -F$d/.config -i$d/home root@wrt.b8.nz cedit ovpn-$X509_0_CN /etc/hosts"
+case $script_type in
+ client-connect) $ssh_cmd <<<"$ifconfig_pool_remote_ip $X509_0_CN"|| [[ $? == 1 ]]
+ ;;
+ client-disconnect)
+ $ssh_cmd <<<$(grep -F $X509_0_CN lan-dns) || [[ $? == 1 ]]
+ ;;
+esac
--- /dev/null
+#!/bin/bash
+
+set -eE -o pipefail
+trap 'echo "$0:$LINENO:error: \"$BASH_COMMAND\" returned $?" >&2' ERR
+
+do-forward() {
+ cmd=$1; shift
+ for port; do
+ /sbin/iptables -t nat $cmd PREROUTING -i eth0 -p tcp -m tcp --dport $port -j DNAT --to-destination 10.8.0.4:$port
+ done
+}
+
+ports=(25 143)
+case $1 in
+ start)
+ do-forward -A ${ports[@]}
+ ;;
+ stop)
+ do-forward -D ${ports[@]}
+ ;;
+ *)
+ echo "$0: error: expected 1 argument of start or stop"
+ exit 1
+ ;;
+esac