#!/bin/bash # Copyright (C) 2019 Ian Kelling # SPDX-License-Identifier: AGPL-3.0-or-later # shellcheck source=/a/bin/ds/.bashrc export LC_USEBASHRC=t; if [[ -s ~/.bashrc ]]; then . ~/.bashrc; fi ### setup source /a/bin/errhandle/err src="$(readlink -f -- "${BASH_SOURCE[0]}")"; src=${src%/*} # directory of this file if [[ $EUID == 0 ]]; then echo "$0: error: run as regular user" >&2 exit 1 fi err-cleanup() { echo 1 >~/.local/distro-end } # shellcheck source=./pkgs source $src/pkgs echo "$0: $(date) starting now)" # see example of usage to understand. end_msg() { local y IFS= read -r -d '' y ||: end_msg_var+="$y" } end() { e "$end_msg_var" echo 0 >~/.local/distro-end if $pending_reboot; then echo "$0: pending reboot and then finished. doing it now." echo "exiting with status 0" sudo reboot now else echo "$0: $(date): ending now)" echo "exiting with status 0" fi exit 0 } pre="${0##*/}:" sudo() { printf "$pre %s\n" "$*" SUDOD="$PWD" command sudo "$@"; } m() { printf "$pre %s\n" "$*"; "$@"; } e() { printf "$pre %s\n" "$*"; } distro=$(distro-name) codename=$(debian-codename) codename_compat=$(debian-codename-compat) pending_reboot=false sed="sed --follow-symlinks" # when we schroot, it will fail if we are in a directory that doesnt exist in the chroot cd / ## template: # case $distro in # esac case $HOSTNAME in li|bk|je) : ;; *) # get sudo pass cached right away if ! sudo -nv 2>/dev/null; then sudo -v fi ;; esac # old repo. remove when all machines updated sudo rm -fv /etc/apt/sources.list.d/wireguard-ubuntu-wireguard-bionic.list # remove old file sudo rm -fv /etc/apt/preferences.d/minetest #### initial packages pup if isdeb; then pi aptitude fi # avoid prompts sudo debconf-set-selections < # AllowOverride None # AuthType basic # AuthName "Authentication Required" # # setup one time, with root:www-data, 640 # AuthUserFile "/etc/prometheus-htpasswd" # Require valid-user # # EOF # fi pi debootstrap ######### begin universal pinned packages ###### case $(debian-codename) in nabia|etiona|flidas) sudo rm -fv /etc/apt/preferences.d/etiona-buster sd /etc/apt/preferences.d/trisquel-debian </dev/null </dev/null; then # moved to fai #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32 sd /etc/apt/preferences.d/flidas-bionic <$t <$t <$t <<'EOF' deb http://http.us.debian.org/debian buster main deb-src http://http.us.debian.org/debian buster main deb http://security.debian.org/ buster/updates main deb-src http://security.debian.org/ buster/updates main deb http://http.us.debian.org/debian buster-updates main deb-src http://http.us.debian.org/debian buster-updates main deb http://http.debian.net/debian buster-backports main deb-src http://http.debian.net/debian buster-backports main EOF ;; bullseye) cat >$t <<'EOF' EOF cat >$t <$t <<'EOF' deb http://us.archive.ubuntu.com/ubuntu/ focal main universe deb http://us.archive.ubuntu.com/ubuntu/ focal-updates main universe deb http://us.archive.ubuntu.com/ubuntu/ focal-security main universe EOF if ! diff -q $t $f; then sudo dd if=$t of=$f 2>/dev/null p update fi sd /etc/apt/preferences.d/nabia-etiona <<'EOF' Package: * Pin: release n=nabia*,o=Trisquel Pin-Priority: -100 EOF f=/etc/apt/sources.list.d/nabia.list t=$(mktemp) cat >$t <<'EOF' deb http://mirror.fsf.org/trisquel/ nabia main deb-src http://mirror.fsf.org/trisquel/ nabia main deb http://mirror.fsf.org/trisquel/ nabia-updates main deb-src http://mirror.fsf.org/trisquel/ nabia-updates main deb http://archive.trisquel.info/trisquel/ nabia-security main deb-src http://archive.trisquel.info/trisquel/ nabia-security main # Uncomment this lines to enable the backports optional repository deb http://mirror.fsf.org/trisquel/ nabia-backports main deb-src http://mirror.fsf.org/trisquel/ nabia-backports main EOF if ! diff -q $t $f; then sudo dd if=$t of=$f 2>/dev/null p update fi # duplicated in fai. Pin for bionic + ubuntu so that ppas dont get # matched. sd /etc/apt/preferences.d/etiona-bionic <<'EOF' Package: * Pin: release n=bionic,o=Ubuntu Pin-Priority: -100 Package: firefox Pin: release n=bionic,o=Ubuntu Pin-Priority: 500 EOF sd /etc/apt/preferences.d/etiona-sa <<'EOF' Package: * Pin: release n=bionic,o=Ubuntu Pin-Priority: -100 EOF ;;& *) if isdeb; then pi debian-goodies shellcheck fi ;; esac case $codename_compat in focal) sd /etc/apt/preferences.d/nabia-btrfs-progs <<'EOF' Package: btrfs-progs Pin: release a=buster-backports Pin-Priority: 500 EOF p install btrfs-progs ;; esac # 2020-03-03 old file s rm -fv /etc/apt/preferences.d/radicale ######### end universal pinned packages ###### case $codename in etiona) sd /etc/apt/preferences.d/obs < AuthType Basic AuthName "basic_auth" # created with # htpasswd -c prometheus-export-htpasswd USERNAME AuthUserFile "/etc/prometheus-export-htpasswd" Require valid-user EOF ;;& bk) sgo wg-quick@wgmail # i just dont feel like setting up a special purpose ssh key to do this automatically. end_msg <<'EOF' # run this once for bk on local machine: # only used for old openvpn setup /a/exe/vpn-mk-client-cert -c bk.b8.nz -b expertpath -n mail li.iankelling.org EOF end ;; je) end ;; li) m /a/h/setup.sh iankelling.org # start mumble only when im going to use it, since i dont use it much pi-nostart mumble-server sudo $sed -ri "s/^ *(serverpassword=).*/\1$(< /a/bin/bash_unpublished/mumble_pass)/" /etc/mumble-server.ini # do certificate to avoid warning about unsigned cert, # which is overkill for my use, but hey, I'm cool, I know # how to do this. m web-conf apache2 mumble.iankelling.org sudo rm -fv /etc/apache2/sites-enabled/mumble.iankelling.org # general vpn for as needed use vpn-server-setup -d -r -4 10.5.5 -p 443 -n hole # todo: consider if this should exist for the # other vpn server teeu /etc/openvpn/server/hole.conf <<'EOF' client-to-client EOF # sullivan d8 sd /etc/openvpn/client-config-hole/sd8 <<'EOF' ifconfig-push 10.5.5.41 255.255.255.0 EOF # hsieh d8 sd /etc/openvpn/client-config-hole/hd8 <<'EOF' ifconfig-push 10.5.5.42 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/onep9 <<'EOF' ifconfig-push 10.5.5.14 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/bo <<'EOF' ifconfig-push 10.5.5.13 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/sy <<'EOF' ifconfig-push 10.5.5.12 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/kw <<'EOF' ifconfig-push 10.5.5.9 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/x3 <<'EOF' ifconfig-push 10.5.5.8 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/x2 <<'EOF' ifconfig-push 10.5.5.7 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/wclient <<'EOF' ifconfig-push 10.5.5.6 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/frodo <<'EOF' ifconfig-push 10.5.5.5 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/amy <<'EOF' ifconfig-push 10.5.5.3 255.255.255.0 EOF sd /etc/openvpn/client-config-hole/kd <<'EOF' ifconfig-push 10.5.5.2 255.255.255.0 EOF # for adding to current system: #vpn-mk-client-cert -s "" -n hole 72.14.176.105 # adding to remove system 107, #vpn-mk-client-cert -s "" -n hole -c 10.2.0.107 -b hd8 iankelling.org # # for wireguard hole vpn # wghole # requested from linode via a support ticket. # https://www.linode.com/docs/networking/an-overview-of-ipv6-on-linode/ # ipv6 stuff pieced together # via slightly wrong information from # https://github.com/angristan/openvpn-install/blob/master/openvpn-install.sh # https://community.openvpn.net/openvpn/wiki/IPv6 # and man openvpn. # # This uses a public /64, but afaik, that is not needed for my # current use case, since I'm doing ipv6 nat, they could be private # and nat to a public address. I'm doing ipv6 nat in order to have 2 # vpn clients that externally share the same ip when sending so they # share ip reputation. Note, the eth0 and tunx needed to be # different subnets. On the same, I saw some neighbor packet go # unanswered, tcpdump showed packets getting lost. I tried splitting the # /64, it didn't work but I didn't investigate closely. # not starting as i intend to replace it m vpn-server-setup -s -i tunmail -n mail -d -6 2600:3c00:e002:3800::/64 2600:3c00:e002:3800::1/64 sudo tee /etc/openvpn/client-config-mail/mailclient <<'EOF' ifconfig-push 10.8.0.4 255.255.255.0 ifconfig-ipv6-push 2600:3c00:e002:3800::4/64 EOF sudo tee /etc/openvpn/client-config-mail/expertpath <<'EOF' ifconfig-push 10.8.0.5 255.255.255.0 ifconfig-ipv6-push 2600:3c00:e002:3800::5/64 EOF sudo dd of=/etc/systemd/system/vpn-mail-forward.service <<'EOF' [Unit] Description=Turns on iptables mail nat BindsTo=openvpn-server@mail.service [Service] Type=oneshot RemainAfterExit=yes ExecStart=/a/bin/distro-setup/vpn-mail-forward tunmail start ExecStop=/a/bin/distro-setup/vpn-mail-forward tunmail stop [Install] RequiredBy=openvpn-server@mail.service EOF ser daemon-reload # needed for li's local mail delivery. tu /etc/hosts <<<"10.8.0.4 mx.iankelling.org" # wgmail handles this. #sgo vpn-mail-forward.service # old: #sgo openvpn-server@mail sgo wg-quick@wgmail # setup let's encrypt cert m web-conf apache2 mail.iankelling.org # TODO, i expanded the above cert manually to mx.iankelling.org, this should be captured # in the automation here. We use mail.iankelling.org as our ehlo name when sending mail # but our mx record is mx.iankelling.org. Initially I was just using mail.iankelling.org, # but the problem is I want multiple ips to be able to identify as mail.iankelling.org, # but a subset to be mx.iankelling.org. Afaik, there is no problem with having # our mail cert be for mail.iankelling.org, and have people connect to mx.ian..., # but it doesn't make logical sense to do this. sudo rm -fv /etc/apache2/sites-enabled/mail.iankelling.org{,-redir}.conf ser reload apache2 a2enmod -q proxy proxy_http domain=cal.iankelling.org web-conf -f 10.8.0.4:5232 - apache2 $domain <<'EOF' # https://radicale.org/2.1.html #https://httpd.apache.org/docs/2.4/mod/mod_authn_core.html#authtype # https://stackoverflow.com/questions/5011102/apache-reverse-proxy-with-basic-authentication Options +FollowSymLinks +Multiviews +Indexes AllowOverride None AuthType basic AuthName "Authentication Required" # setup one time, with root:www-data, 640 AuthUserFile "/etc/caldav-htpasswd" Require valid-user RequestHeader set X-Script-Name /radicale/ RequestHeader set X-Remote-User expr=%{REMOTE_USER} ProxyPass "http://10.8.0.4:5232/" retry=0 ProxyPassReverse "http://10.8.0.4:5232/" EOF # nginx version of above would be: # auth_basic "Not currently available"; # auth_basic_user_file /etc/nginx/caldav/htpasswd; ###### begin znc setup ##### pi znc # https://wiki.znc.in/FAQ seems to imply that znc doesn\'t need restart after cert change. # to get into the web interface, # then use non-main browser or else it doebsn't allow it based on ocsp stapling from my main site. # https://iankelling.org:12533/ # login as ian, pass is in pass store, the part after : # znc config generated by doing # znc --makeconf # selected port is also used in erc config # comma separated channel list worked. # while figuring things out, running znc -D for debug in foreground. # to exit and save config: # /msg *status shutdown # configed auth on freenode by following # https://wiki.znc.in/Sasl: # /query *sasl # RequireAuth yes # Mechanism PLAIN # Set MyNickservName pa$$w0rd # created the system service after, and had to do # mv /home/iank/.znc/* /var/lib/znc # sed -i 's,/home/iank/.znc/,/var/lib/znc,' /var/lib/znc/config/znc.conf # and made a copy of the config files into /p/c # /msg *status LoadMod --type=global log -sanitize # todo: in config file AllowWeb = true should be false. better security if that is off unless we need it. # /msg *status LoadMod --type=network perform # /query *perform add PRIVMSG ChanServ :invite #fsf-office # /msg *perform add JOIN #fsf-office # /msg *status LoadMod NickServ # # i set Buffer = 500 # also ran /znc LoadMod clearbufferonmsg # it would be nice if erc supported erc query buffers by doing # /msg *status clearbuffer /dev/null; then sudo useradd --create-home -d /var/lib/znc --system --shell /sbin/nologin --comment "Account to run ZNC daemon" --user-group znc fi sudo chmod 700 /var/lib/znc sudo chown -R znc:znc /var/lib/znc # Avoid restarting if possible, reconnecting to irc is annoying. # The unit file was made active with conflink. # Note, avoid using ser here because we wrap sudo to prints the command first. if [[ $(systemctl is-active znc) != active ]]; then sgo znc fi ###### stop znc setup ##### end ;; esac ###### end website setup ########### end section including li/lj ############### #### desktop stuff ### system76 things ### case $HOSTNAME in sy|bo) # note, i stored the initial popos packages at /a/bin/data/popos-pkgs if [[ ! -e /etc/apt/sources.list.d/system76.list ]]; then # https://blog.zackad.dev/en/2017/08/17/add-ppa-simple-way.html sd /etc/apt/sources.list.d/system76.list </dev/null; then sudo -u gdm dbus-launch gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-type 'nothing' fi ;;& focal) # lightdm startup fails without this. on etiona, we got unity-greeter installed automatically, # it would probably work on nabia too, but google just lead me to this and it works fine. pi lightdm-gtk-greeter # ok, this is really weird # normally this file has sd /etc/X11/Xsession.d/99x11-common_start <<'EOF' # this is very odd. # this file normally just has: exec $STARTUP # but then ~/.xsession-errors # says $STARTUP is not found, which appears to be the real # error, but normal bash doesnt operate that way. # https://bugs.launchpad.net/ubuntu/+source/im-config/+bug/1879352 eval exec $STARTUP EOF # also, gdm wayland was default, had to do # dpkg-reconfigure gdm3 # and choose lightdm. # ;; esac if [[ $codename_compat == flidas ]]; then # doesnt exist in newer pi gnupg-doc fi case $distro in trisquel) case $codename in *) # firefox would work here too, apt would select abrowser. # and the update-alternatives thing i think is becuz firefox is # generally available before abrowser and this helps fix things # up when we have both. pi abrowser s update-alternatives --set x-www-browser /usr/bin/abrowser ;; esac ;; ubuntu) pi firefox ;; debian) pi firefox-esr ;; esac # newer distros just use adb for package name it seems case $codename_compat in bionic|stretch) pi android-tools-adbd ;; esac case $codename_compat in focal) pi libext2fs-dev libzstd-dev cd /a/opt/btrfs-progs ;; esac case $codename_compat in bionic) pi python-vte ;; *) pi reportbug-gtk ;; esac # not packaged in newer distros it seems case $codename_compat in bionic|buster|stretch) pi dtrx ;; esac # TODO: some of the X programs can be removed from pall when using wayland # depends gcc is a way to install suggests. this is apparently the only # way to install suggests even if the main package is already # installed. reinstall doesn't work, uninstalling can cause removing # dependent packages. pi ${pall[@]} $(apt-cache search ruby[.0-9]+-doc| awk '{print $1}') $($src/distro-pkgs) # commented, not worth the hassle i think. #seru enable psd #seru start psd # old filename sudo rm -fv /etc/systemd/resolved.conf.d/iank.conf # website is dead june 14 2019. back in october, but meh sudo rm -fv /etc/apt/sources.list.d/iridium-browser.list # case $distro in # debian) # pi chromium ;; # trisquel|ubuntu) # wget -qO - https://downloads.iridiumbrowser.de/ubuntu/iridium-release-sign-01.pub|sudo apt-key add - # t=$(mktemp) # cat >$t </dev/null; then # note, see bashrc for more documentation. pi rss2email sd /etc/systemd/system/rss2email.service <<'EOF' [Unit] Description=rss2email After=multi-user.target [Service] User=iank Type=oneshot # about 24 hours of failures # it copies over its files without respecting symlinks, so # we pass options to use different location. ExecStart=/usr/local/bin/sysd-mail-once -288 rss2email r2e -d /p/c/rss2email.json -c /p/c/rss2email.cfg run EOF sd /etc/systemd/system/rss2email.timer <<'EOF' [Unit] Description=rss2email [Timer] # for initial run. required. OnActiveSec=30 # for subsequent runs. OnUnitInactiveSec=300 [Install] WantedBy=timers.target EOF sudo systemctl daemon-reload fi ######### begin irc periodic backup ############# if [[ $HOSTNAME == frodo ]]; then sd /etc/systemd/system/ircbackup.service <<'EOF' [Unit] Description=irc li backup After=multi-user.target [Service] User=iank Type=oneshot ExecStart=/usr/local/bin/sysd-mail-once irc-backup rsync -rlptDhSAX root@iankelling.org:/var/lib/znc/moddata/log/iank/freenode/ /k/irclogs EOF sd /etc/systemd/system/ircbackup.timer <<'EOF' [Unit] Description=irc li backup hourly [Timer] OnCalendar=hourly [Install] WantedBy=timers.target EOF sudo systemctl daemon-reload sgo ircbackup.timer fi ######### end irc periodic backup ############# pi-nostart openvpn # pi-nostart does not disable ser disable openvpn if [[ -e /p/c/gen-fsf-vpn ]]; then /p/c/gen-fsf-vpn fi if [[ -e /p/c/machine_specific/$HOSTNAME/filesystem/etc/openvpn/client/hole.crt ]]; then sgo openvpn-client@hole fi if [[ $HOSTNAME == frodo ]]; then vpn-mk-client-cert -b frodo -n hole iankelling.org fi ############# begin syncthing setup ########### case $HOSTNAME in kd|frodo) f=/usr/share/keyrings/syncthing-archive-keyring.gpg if [[ ! -e $f ]]; then s curl -s -o $f https://syncthing.net/release-key.gpg fi s="deb [signed-by=/usr/share/keyrings/syncthing-archive-keyring.gpg] https://apt.syncthing.net/ syncthing stable" if [[ $(cat /etc/apt/sources.list.d/syncthing.list) != "$s" ]]; then echo "$s" | sd /etc/apt/sources.list.d/syncthing.list p update fi pi syncthing ;;& frodo) m lnf -T /w/syncthing /home/iank/.config/syncthing ser daemon-reload # syncthing likely not properly packaged sgo syncthing@iank # runs as iank ;; kd) # 1003 just happens to be what was on my system if ! getent passwd ziva; then s groupadd -g 1003 ziva # syncthing state / config / db are all in ~/.config/syncthing s useradd -g 1003 -u 1003 -d /d/ziva-home -c ziva -s /bin/bash ziva fi sgo syncthing@ziva ;; esac # user for short term use dropping of privileges if ! getent group zu &>/dev/null; then s groupadd -g 1023 zu fi if ! getent passwd zu &>/dev/null; then s useradd -g 1023 -u 1023 -c zu -s /bin/bash zu fi # these things persist in ~/.config/syncthing, which I save in # /w/syncthing (not in /p, because syncthing should continue to # run on home server even when using laptop as primary device) # open http://localhost:8384/ # change listen address from default to tcp://:22001, # this is because we do port forward so it doesn\'t have to use # some external server, but the syncthing is broken for port forward, # you get a message, something "like connected to myself, this should not happen" # when connecting to other local devices, so I bump the port up by 1, # based on # https://forum.syncthing.net/t/connected-to-myself-should-not-happen/1763/19. # Without this, it was being stuck syncing at 0%. # Set gui username and password. # # install syncthing via f-droid, # folder setting, turn off send only. # on phone, add device, click bar code icon # on dekstop, top right, actions, device id # after adding, notification will appear on desktop to confirm # # syncing folder. from phone to desktop: select desktop in the # folder on phone\'s sync options, notification will appear in # desktop\'s web ui within a minute. For the reverse, the # notification will appear in android\'s notifications, you have to # swipe down and tap it to add the folder. It won\'t appear in the # syncthing ui, which would be intuitive, but don\'t wait for it # there. The notification may not work, instead open the web gui # from in the app, there should be a notification within there. # # On phone, set settings to run syncthing all the time, and # show no notification. # # Folder versioning would make sense if I didn\'t already use btrfs # for backups. I would choose staggered, or trash can for more space. # # if needed to install on a remote comp: # ssh -L 8384:localhost:8384 -N frodo # open http://localhost:8384/ # # Note, the other thing i did was port forward port 22000, # per https://docs.syncthing.net/users/firewall.html ############# end syncthing setup ########### ####### begin misc packages ########### # template case $codename in flidas) : ;; esac sudo update-alternatives --set pinentry /usr/bin/pinentry-gtk-2 # sakura config is owned by ian m reset-sakura m reset-konsole m sudo -u user2 -i reset-konsole # user2 xscreensaver we don't want to reset m reset-xscreensaver # this would install from cabal for newer / consistent version across os, but it screws up xmonad, so disabled for now. # this is also in primary-setup # pi libxss-dev # dependency based on build failure # cabal update # cabal install --upgrade-dependencies --force-reinstalls arbtt # also, i assume syncing this between machines somehow messed up the data. if mountpoint /p &>/dev/null; then case $codename in etiona|nabia) pi arbtt # same as seru enable arbtt, but works over ssh when systemctl --user causes error: # Failed to connect to bus: No such file or directory lnf -T /a/bin/ds/subdir_files/.config/systemd/user/arbtt.service /home/iank/.config/systemd/user/default.target.wants/arbtt.service # allow failure seru start arbtt ||: ;; esac fi m primary-setup if [[ ! -e ~/.linphonerc && -e /p/.linphonerc-initial ]]; then m cp /p/.linphonerc-initial ~/.linphonerc fi # linphone in t10 wont do dns with systemd-resolved or something s teeu /etc/hosts <<'EOF' 74.94.156.215 watson.fsf.org EOF ### begin spd install if mountpoint /p &>/dev/null; then pi libswitch-perl libdigest-md5-file-perl libgnupg-interface-perl t=$(mktemp) m wget -O $t http://mirror.fsf.org/fsfsys-trisquel/fsfsys-trisquel/pool/main/s/spd-perl/spd-perl_0.2-1_amd64.deb sudo dpkg -i $t m rm $t # this guesses at the appropriate directory, adjust if needed perldir=(/usr/lib/x86_64-linux-gnu/perl/5.*) m sudo ln -sf ../../../perl/5.18.2/SPD/ ${perldir[0]} # newer distro had gpg2 as default, older one, flidas, need to make it that way gpgpath=$(which gpg2) ||: if [[ $gpgpath ]]; then sudo mkdir -p /usr/local/spdhackfix s lnf -T $gpgpath /usr/local/spdhackfix/gpg fi fi ### end spd install # nagstamon setting which were set through the ui # monitor url https://klaxon.fsf.org/cgi-bin # monitor url cgi https://klaxon.fsf.org/cgi-bin # in filters tab: # all unknown services # all warning services # acknowledged hosts & services # hosts & services down for maintenence # services on down hosts # services on hosts in maintenece # services on unreachable osts # hosts in soft state # services in soft state # in display tab: fullscreen # these translate to these settings I think # filter_acknowledged_hosts_services = True # filter_all_unknown_services = True # filter_all_warning_services = True # filter_hosts_in_soft_state = True # filter_hosts_services_maintenance = True # filter_services_in_soft_state = True # filter_services_on_down_hosts = True # filter_services_on_hosts_in_maintenance = True # filter_services_on_unreachable_hosts = True # notify_if_up = False # statusbar_floating = False # fullscreen = True # but i'm just going to rely on the webpage plus sms for now. # it asks if it should make users in it's group capture packets without root, # which is arguably more secure than running wireshark as root. default is no, # which is what i prefer, since I plan to use tcpdump to input to wireshark. s DEBIAN_FRONTEND=noninteractive pi wireshark-gtk # /run and /dev/shm are listed as required for pulseaudio. All 4 in the group # listed in the default config as suggested. # /run/usr/1000 i noticed was missing for pulseaudio # /run/user/0 just seemed like a not bad idea, given the above tu /etc/schroot/desktop/fstab <<'EOF' /p /p none rw,bind 0 0 /a /a none rw,bind 0 0 /run /run none rw,bind 0 0 /run/lock /run/lock none rw,bind 0 0 /dev/shm /dev/shm none rw,bind 0 0 /run/shm /run/shm none rw,bind 0 0 /run/user/1000 /run/user/1000 none rw,bind 0 0 /run/user/1001 /run/user/1001 none rw,bind 0 0 /run/user/0 /run/user/0 none rw,bind 0 0 EOF # todo: consider if this should use the new sysd-prom-fail sd /etc/systemd/system/schrootupdate.service <<'EOF' [Unit] Description=schrootupdate After=multi-user.target [Service] Type=oneshot ExecStart=/usr/local/bin/sysd-mail-once schrootupdate /a/bin/distro-setup/schrootupdate EOF sd /etc/systemd/system/schrootupdate.timer <<'EOF' [Unit] Description=schrootupdate [Timer] OnCalendar=*-*-* 04:20:00 America/New_York [Install] WantedBy=timers.target EOF ser daemon-reload sgo schrootupdate.timer # for my roommate case $distro in trisquel) m mkschroot -s /a/bin/fai/fai/config/files/etc/apt/sources.list.d/bullseye.list/BULLSEYE_FREE \ debian bullseye firefox-esr pulseaudio chromium anki case $(debian-codename) in etiona|nabia) # we have a lot of t8 stuff, useful to have m mkschroot -s /a/bin/fai/fai/config/files/etc/apt/sources.list.d/flidas.list/FLIDAS \ trisquel flidas tu /nocow/schroot/flidas/etc/sudoers < /dev/null; then sudo groupadd -g 450 debian-transmission sudo adduser --quiet \ --gid 450 \ --uid 450 \ --system \ --no-create-home \ --disabled-password \ --home /var/lib/transmission-daemon \ debian-transmission fi # We want group writable stuff from transmission. # However, after setting this, I learn that transmission sets it's # own umask based on it's settings file. Well, no harm leaving this # so it's set right from the beginning. sudo chfn debian-transmission -o umask=0002 # note i had to do this, which is persistent: # cd /i/k # s chgrp debian-transmission torrents partial-torrents # syslog says things like # 'Failed to set receive buffer: requested 4194304, got 425984' # google suggets giving it even more than that tu /etc/sysctl.conf<<'EOF' net.core.rmem_max = 67108864 net.core.wmem_max = 16777216 EOF sudo sysctl -p # some reason it doesn\'t seem to start automatically anyways pi-nostart transmission-daemon # be extra sure its not started ser disable transmission-daemon ser stop transmission-daemon # the folder was moved here after an install around 02/2017. # it contains runtime data, # plus a simple symlink to the config file which it\'s # not worth separating out. # between comps, the uid can change f=$tdir/transmission-daemon for d in $tdir/partial-torrents $tdir/torrents $f; do if [[ ! -d $d ]]; then mkdir $d fi sudo chown -R debian-transmission:user2 $d done s lnf -T $f /var/lib/transmission-daemon/.config/transmission-daemon s lnf -T /etc/transmission-daemon/settings.json $f/settings.json sudo chown -R debian-transmission:debian-transmission $f sudo chown -R debian-transmission:debian-transmission /var/lib/transmission-daemon # # config file documented here, and it\'s the same config # for daemon vs client, so it\'s documented in the gui. # https://trac.transmissionbt.com/wiki/EditConfigFiles#Options # # I originaly setup rpc-whitelist, but after using # routing to a network namespace, it doesn\'t see the # real source address, so it\'s disabled. # # Changed the cache-size to 256 mb, reduces disk use. # It is a read & write cache. if ! systemctl is-active transmission-daemon-nn &>/dev/null && \ ! systemctl is-active transmission-daemon; then tmp=$(mktemp) command sudo ruby <$tmp require 'json' p = '/etc/transmission-daemon/settings.json' s = { 'rpc-whitelist-enabled' => false, 'rpc-authentication-required' => false, 'incomplete-dir' => '$tdir/partial-torrents', 'incomplete-dir-enabled' => true, 'download-dir' => '$tdir/torrents', "speed-limit-up" => 800, "speed-limit-up-enabled" => true, "peer-port" => 61486, "cache-size-mb" => 256, "ratio-limit" => 5.0, "ratio-limit-enabled" => false, } puts(JSON.pretty_generate(JSON.parse(File.read(p)).merge(s))) EOF cat $tmp | sudo dd of=/etc/transmission-daemon/settings.json fi ####### end transmission case $HOSTNAME in kd) # to persist upload/dl metadata. initially, moved all the stuff # in /var/lib/transmission-daemon to /d/tor s usermod --home /d/tor debian-transmission sgo transmission-daemon-nn ;; esac ######### begin transmission client setup ###### if [[ -e /p/transmission-rpc-pass ]]; then # arch had a default config, # debian had nothing until you start it. # With a little trial an error, here is a minimal config # taken from the generated one, plus changes that the # settings ui does, without a bunch of ui crap settings. # # only settings I set were # hostname # auto-connect # password # the password is randomly generated on first run, i copied it out # so it could be used by other hosts. sudo ruby <<'EOF' require 'json' p = '/etc/transmission-daemon/settings.json' s = JSON.parse(File.read(p)) s["rpc-password"] = File.read("/p/transmission-rpc-pass").chomp # default is 0022 (18 in decimal) s["umask"] = 2 File.write p, JSON.pretty_generate(s) EOF rpc_pass=$(

/dev/null) || continue if [[ ! $uid -ge 1000 ]]; then continue fi d=$f/.config/transmission-remote-gtk sudo -u $u mkdir -p $d # i tried setting hostname to transmission.b8.nz, so i could dynamically change where # this connects to, but it said some 421 denied error when I did that. Then it # froze X when i ran it under strace. Whatever. sudo -u $u dd of=$d/config.json </dev/null; then mkdir -p ~/.timetrap/ ln -sf /a/opt/timetrap_formatters ~/.timetrap/formatters if ! type -p t &>/dev/null; then pi ruby-dev libsqlite3-dev gem install --user timetrap fi fi ### end timetrap setup # leftover cleanup for t9 pu resolvconf case $codename in # needed for debootstrap scripts for fai since fai requires debian flidas) pi dnsmasq pi-nostart network-manager # i hate networkmanager. noo, of course disabling it doesnt work # unless you add a hack. found this by doing # systemd-analyze dot > x.dot # less x.dot # /networkman soff NetworkManager soff NetworkManager-wait-online.service # make networkmanager use resolvconf instead of its own dnsmasq which # conflicts with the normal dnsmasq package. f=/etc/NetworkManager/NetworkManager.conf m=$(md5sum $f) sudo sed -ri '/ *\[main\]/,/^ *\[[^]]+\]/{/^\s*dns[[:space:]=]/d}' $f sudo sed -ri '/ *\[main\]/a dns=default' $f if [[ $m != $(md5sum $f) ]]; then srestart NetworkManager fi # networkmanager has this nasty behavior on flidas: if the machine # crashes with dnsmasq running, on subsequent boot, it adds an entry to # resolvconf for 127.0.0.1 in some stupid attempt to restore # nameservers. # This can be manually fixed by stoping dnsmasq, # then based on whats in /run/dnsmasq/, i see we can run # s resolvconf -d NetworkManager # oh ya, and stoping NetworkManager leaves this crap behind without cleaning it up. ser disable NetworkManager ;; esac # I have no use for avahi, # had to run this twice when doing manually, i dunno why soff avahi-daemon ||: sleep 1 soff avahi-daemon # make my /etc/fonts/conf.d/ get used. # I have a new sans-serif font there because the default one # displays l and I as the same char, grrrrr. sudo fc-cache pi desktop-file-utils m /a/bin/distro-setup/mymimes # stop autopoping windows when i plug in an android phone. # dbus-launch makes this work within an ssh connection, otherwise you get this message, # with still 0 exit code. # dconf-WARNING **: failed to commit changes to dconf: Cannot autolaunch D-Bus without X11 $DISPLAY m dbus-launch gsettings set org.gnome.desktop.media-handling automount-open false # on grub upgrade, we get prompts unless we do this devs=() for dev in $(s btrfs fil show /boot | sed -nr 's#.*path\s+(\S+)$#\1#p'); do devs+=("$(devbyid $dev),") done devs[-1]=${devs[-1]%,} # jonied by commas sudo debconf-set-selections < AuthType Basic AuthName "basic_auth" # created with # htpasswd -c prometheus-htpasswd USERNAME AuthUserFile "/etc/prometheus-htpasswd" Require valid-user EOF web-conf -p 9094 -f 9093 - apache2 i.b8.nz <<'EOF' AuthType Basic AuthName "basic_auth" # created with # htpasswd -c prometheus-htpasswd USERNAME AuthUserFile "/etc/prometheus-htpasswd" Require valid-user EOF # by default, the alertmanager web ui is not enabled other than a page # that suggests to use the amtool cli. that tool is good, but you cant # silence things nearly as fast. if [[ ! -e /usr/share/prometheus/alertmanager/ui/index.html ]]; then sudo chroot /nocow/schroot/bullseye prometheus-alertmanager sudo chroot /nocow/schroot/bullseye /usr/share/prometheus/alertmanager/generate-ui.sh sudo rsync -avih /nocow/schroot/bullseye/usr/share/prometheus/alertmanager/ui/ /usr/share/prometheus/alertmanager/ui ser restart prometheus-alertmanager fi for ser in prometheus-node-exporter prometheus-alertmanager prometheus; do sysd-prom-fail-install $ser done ;; *) pi prometheus-node-exporter ;; esac case $HOSTNAME in # frodo needs upgrade first. frodo) : ;; # todo, for limiting node exporter http, # either use iptables or, in # /etc/default/prometheus-node-exporter # listen on the wireguard interface *) wgip=$(command sudo sed -rn 's,^ *Address *= *([^/]+).*,\1,p' /etc/wireguard/wghole.conf) # old filename. remove once all hosts are updated. s rm -fv /etc/apache2/sites-enabled/${HOSTNAME}wg.b8.nz.conf web-conf -i -a $wgip -p 9101 -f 9100 - apache2 ${HOSTNAME}wg.b8.nz <<'EOF' AuthType Basic AuthName "basic_auth" # created with # htpasswd -c prometheus-export-htpasswd USERNAME AuthUserFile "/etc/prometheus-export-htpasswd" Require valid-user EOF # For work, i think we will just use the firewall for hosts in the main data center, and # vpn for hosts outside it. # TODO: figure out how to detect the ping failure and try again. # Binding to the wg interface, it might go down, so always restart, and wait for it on boot. s mkdir /etc/systemd/system/apache2.service.d sd /etc/systemd/system/apache2.service.d/restart.conf </dev/null; then # cd $(mktemp -d) # wget https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh # # added some stuff to envonment.sh for profile based on # # manual instructions # # wget https://sv.gnu.org/people/viewgpg.php?user_id=15145 -qO - | gpg --import - # # echo is to get past prompt # yes | sudo -E HOME=$HOME bash guix-install.sh || [[ $? == 141 ]] # guix install glibc-utf8-locales # guix package --install guile # fi lnf -T /a/opt ~/src pi tor m /a/bin/buildscripts/tor-browser # one root command needed to install s ln -sf /a/opt/tor-browser_en-US/Browser/start-tor-browser /usr/local/bin # nfs server pi-nostart nfs-kernel-server # todo, this is old, probably needs removing if [[ $HOSTNAME == tp ]]; then sd /etc/wireguard/wg0.conf <