-#!/bin/bash -l
-# Copyright (C) 2016 Ian Kelling
+#!/bin/bash
+# Copyright (C) 2019 Ian Kelling
+# SPDX-License-Identifier: AGPL-3.0-or-later
+if [[ -s ~/.bashrc ]];then . ~/.bashrc;fi
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+### setup
+source /a/bin/errhandle/err
+src="$(readlink -f -- "${BASH_SOURCE[0]}")"; src=${src%/*} # directory of this file
-# http://www.apache.org/licenses/LICENSE-2.0
+if [[ $EUID == 0 ]]; then
+ echo "$0: error: run as regular user" >&2
+ exit 1
+fi
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+errcatch-cleanup() {
+ echo 1 >~/.local/distro-end
+}
-### setup
-source /a/bin/errhandle/err
-src="${BASH_SOURCE%/*}"
+# shellcheck source=./pkgs
source $src/pkgs
-set -x
exec &> >(sudo tee -a /var/log/distro-end)
echo "$0: $(date): starting now)"
# see example of usage to understand.
IFS= read -r -d '' y ||:
end_msg_var+="$y"
}
-spa() { # simple package add
- simple_packages+=($@)
+end() {
+ e "$end_msg_var"
+ echo 0 >~/.local/distro-end
+ if $pending_reboot; then
+ echo "$0: pending reboot and then finished. doing it now."
+ sudo reboot now
+ else
+ echo "$0: $(date): ending now)"
+ fi
+ exit 0
+}
+pre="${0##*/}:"
+sudo() {
+ printf "$pre %s\n" "$*"
+ SUDOD="$PWD" command sudo "$@";
}
+sd() {
+ sudo dd of="$1" 2>/dev/null
+}
+m() { printf "$pre %s\n" "$*"; "$@"; }
+e() { printf "$pre %s\n" "$*"; }
+err() { echo "[$(date +'%Y-%m-%d %H:%M:%S%z')]: $0: $*" >&2; }
distro=$(distro-name)
+codename=$(debian-codename)
+codename_compat=$(debian-codename-compat)
pending_reboot=false
sed="sed --follow-symlinks"
-# template
-case $distro in
-esac
+## template:
+# case $distro in
+# esac
#### initial packages
pup
pi aptitude
fi
-########### begin section including li ################
-pi ${p3[@]} $($src/distro-pkgs)
-
-conflink
-
-case $distro in
- arch) sgo cronie ;;
-esac
-
-case $distro in
- arch) sgo atd ;;
-esac
+# avoid prompts
+sudo debconf-set-selections <<EOF
+popularity-contest popularity-contest/participate boolean true
+EOF
+########### begin section including linode ################
+pi ${p2[@]}
-case $distro in
- arch) sgo ntpd ;;
-esac
+conflink
# no equivalent in other distros:
-case $distro in
- debian|trisquel|ubuntu)
- if ! dpkg -s apt-file &>/dev/null; then
- # this condition is just a speed optimization
- pi apt-file
- s apt-file update
- fi
- ;;
-esac
+if isdeb && pcheck apt-file; then
+ # this condition is just a speed optimization
+ pi apt-file
+ sudo apt-file update
+fi
+
# disable motd junk.
case $distro in
# i'd rather disable the service than comment the init file
# this says disabling the service, it will still get restarted
# but this script doesn't do anything on restart, so it should be fine
- s dd of=/var/run/motd.dynamic if=/dev/null
- # stretch doesn't have initscripts pkg installed by default
- if [[ $(debian-codename) == jessie ]]; then
- s update-rc.d motd disable
- fi
+ sudo truncate -s0 /var/run/motd.dynamic
;;
trisquel|ubuntu)
# this isn't a complete solution. It still shows me when updates are available,
# but it's no big deal.
- s t /etc/update-motd.d/10-help-text /etc/update-motd.d/00-header
+ sudo rm -fv /etc/update-motd.d/10-help-text /etc/update-motd.d/00-header
;;
esac
-# automatic updates
-# reference:
-# https://debian-handbook.info/browse/stable/sect.regular-upgrades.html
-# /etc/cron.daily/apt calls unattended-upgrades
-# /usr/share/doc/unattended-upgrades# cat README.md
-# /etc/apt/apt.conf.d/50unattended-upgrades
-if isdebian; then
- setup-debian-auto-update
-fi
-### begin docker install ####
-if isdeb; then
- # https://store.docker.com/editions/community/docker-ce-server-debian?tab=description
- pi software-properties-common apt-transport-https
- curl -fsSL https://download.docker.com/linux/$(distro-name-compat)/gpg | sudo apt-key add -
- url=https://download.docker.com/linux/$(distro-name-compat)
- l="deb [arch=amd64] $url $(debian-codename-compat) stable"
+### begin certbot install ###
+if [[ $distro == debian ]]; then
+ # note, need python-certbot-nginx for nginx, but it depends on nginx,
+ # and I'm not installing nginx by default right now.
+ pi certbot python-certbot-apache
+elif [[ $codename_compat == xenial ]]; then
+ # not packaged in xenial or flidas
+ pi software-properties-common
+ l="deb http://ppa.launchpad.net/certbot/certbot/ubuntu xenial main"
if ! grep -xFq "$l" /etc/apt/sources.list{,.d/*.list}; then
- sudo add-apt-repository "$l"
- p update
+ sudo add-apt-repository -y ppa:certbot/certbot ||:
+ m p update
fi
- # docker eats up a fair amount of cpu when doing nothing, so don't enable it unless
- # we really need it.
- pi-nostart docker-ce
- # and docker is even more crap, it ignores that it shouldnt start
- ser stop docker
- ser disable docker
- case $HOSTNAME in
- li|lj) sgo docker ;;
- esac
- # other distros unknown
+ pi python-certbot-apache
+else
+ err "distro unknown for certbot"
fi
-### end docker install ####
-
-
-
-### begin certbot install ###
-case $distro in
- debian)
- # note, need python-certbot-nginx for nginx, but it depends on nginx,
- # and I'm not installing nginx by default right now.
- # note python-certbot-apache is in suggests, but so is a doc package that brought in xorg
- if [[ $(debian-codename) == jessie ]]; then
- pi -t jessie-backports certbot python-certbot-apache
- else
- pi certbot python-certbot-apache
- fi
- ;;
- trisquel|ubuntu)
- # not packaged in xenial or flidas
- pi software-properties-common
- l="deb http://ppa.launchpad.net/certbot/certbot/ubuntu xenial main"
- if ! grep -xFq "$l" /etc/apt/sources.list{,.d/*.list}; then
- s add-apt-repository -y ppa:certbot/certbot ||:
- p update
- fi
- pi python-certbot-apache
- ;;
- # todo: other distros unknown
-esac
# make a version of the certbot timer that emails me.
x=/systemd/system/certbot
-$sed -r -f - /lib$x.timer <<'EOF' |s dd of=/etc${x}mail.timer
+$sed -r -f - /lib$x.timer <<'EOF' |sd /etc${x}mail.timer
s,^Description.*,\0 mail version,
EOF
-$sed -r -f - /lib$x.service <<'EOF' |s dd of=/etc${x}mail.service
+$sed -r -f - /lib$x.service <<'EOF' |sd /etc${x}mail.service
s,(ExecStart=)(/usr/bin/certbot),\1/a/bin/log-quiet/sysd-mail-once certbotmail \2 --renew-hook /a/bin/distro-setup/certbot-renew-hook,
EOF
ser daemon-reload
# ;;
# esac
-pi ${p1[@]}
-##### begin automatic upgrades ####
-# this makes it so we upgrade everything
-debconf-set-selections <<'EOF'
-unattended-upgrades unattended-upgrades/origins_pattern string "codename=${distro_codename}";
+## not actually using prometheus just yet
+# # office is not exposed to internet yet
+# if [[ $HOSTNAME != kw ]]; then
+# ## prometheus node exporter setup
+# web-conf -f 9100 -p 9101 apache2 $(hostname -f) <<'EOF'
+# #https://httpd.apache.org/docs/2.4/mod/mod_authn_core.html#authtype
+# # https://stackoverflow.com/questions/5011102/apache-reverse-proxy-with-basic-authentication
+# <Location />
+# AllowOverride None
+# AuthType basic
+# AuthName "Authentication Required"
+# # setup one time, with root:www-data, 640
+# AuthUserFile "/etc/prometheus-htpasswd"
+# Require valid-user
+# </Location>
+# EOF
+# fi
+
+
+######### begin flidas pinned packages ######
+case $(debian-codename) in
+ # needed for debootstrap scripts for fai since fai requires debian
+ flidas)
+ curl http://archive.ubuntu.com/ubuntu/project/ubuntu-archive-keyring.gpg | s apt-key add -
+ sd /etc/apt/preferences.d/flidas-xenial <<EOF
+Package: *
+Pin: release a=xenial
+Pin-Priority: -100
+
+Package: *
+Pin: release a=xenial-updates
+Pin-Priority: -100
+
+Package: *
+Pin: release a=xenial-security
+Pin-Priority: -100
+EOF
+ sd /etc/apt/sources.list.d/xenial.list 2>/dev/null <<EOF
+deb http://us.archive.ubuntu.com/ubuntu/ xenial main
+deb http://us.archive.ubuntu.com/ubuntu/ xenial-updates main
+deb http://us.archive.ubuntu.com/ubuntu/ xenial-security main
EOF
-dpkg-reconfigure -u -fnoninteractive unattended-upgrades
-# Setup daily reboots, so all unattended upgrades go into affect
-# unattended upgrades happen at 6 am + rand(60 min).
-echo '20 7 * * * root /usr/local/bin/zelous-unattended-reboot' >/etc/cron.d/unattended-upgrade-reboot
-##### end automatic upgrades ####
+ if ! apt-key list | grep /C0B21F32 &>/dev/null; then
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
+ sd /etc/apt/preferences.d/flidas-bionic <<EOF
+Package: *
+Pin: release a=bionic
+Pin-Priority: -100
+Package: *
+Pin: release a=bionic-updates
+Pin-Priority: -100
-## prometheus node exporter setup
-web-conf -f 9100 -p 9101 apache2 $(hostname -f) <<'EOF'
-#https://httpd.apache.org/docs/2.4/mod/mod_authn_core.html#authtype
-# https://stackoverflow.com/questions/5011102/apache-reverse-proxy-with-basic-authentication
-<Location />
- AllowOverride None
- AuthType basic
- AuthName "Authentication Required"
- # setup one time, with root:www-data, 640
- AuthUserFile "/etc/prometheus-htpasswd"
- Require valid-user
-</Location>
+Package: *
+Pin: release a=bionic-security
+Pin-Priority: -100
+EOF
+ fi
+
+ # better to run btrfs-progs which matches our kernel version
+ # (note, renamed from btrfs-tools)
+ sd /etc/apt/preferences.d/btrfs-progs <<EOF
+Package: btrfs-progs libzstd1
+Pin: release a=bionic
+Pin-Priority: 1005
+
+Package: btrfs-progs libzstd1
+Pin: release a=bionic-updates
+Pin-Priority: 1005
+
+Package: btrfs-progs libzstd1
+Pin: release a=bionic-security
+Pin-Priority: 1005
EOF
-# website setup
+ t=$(mktemp)
+ cat >$t <<EOF
+deb http://us.archive.ubuntu.com/ubuntu/ bionic main
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main
+deb http://us.archive.ubuntu.com/ubuntu/ bionic-security main
+EOF
+ f=/etc/apt/sources.list.d/bionic.list
+ if ! diff -q $t $f; then
+ sudo cp $t $f
+ sudo chmod 644 $f
+ m p update
+ fi
+
+ # no special reason, but its better for btrfs-progs to
+ # be closer to our kernel version
+ pi btrfs-progs
+
+ if [[ ! -e /usr/share/debootstrap/scripts/xenial ]]; then
+ t=$(mktemp -d)
+ cd $t
+ m aptitude download debootstrap/xenial
+ m ex ./*
+ sudo cp ./usr/share/debootstrap/scripts/* /usr/share/debootstrap/scripts
+ fi
+
+ sd /etc/apt/preferences.d/flidas-etiona <<EOF
+Package: *
+Pin: release a=etiona
+Pin-Priority: -100
+
+Package: *
+Pin: release a=etiona-updates
+Pin-Priority: -100
+
+Package: *
+Pin: release a=etiona-security
+Pin-Priority: -100
+
+Package: *
+Pin: release a=etiona-backports
+Pin-Priority: -100
+EOF
+
+ t=$(mktemp)
+ cat >$t <<EOF
+deb http://mirror.fsf.org/trisquel/ etiona main
+deb http://mirror.fsf.org/trisquel/ etiona-updates main
+deb http://archive.trisquel.info/trisquel/ etiona-security main
+deb http://mirror.fsf.org/trisquel/ etiona-backports main
+EOF
+ f=/etc/apt/sources.list.d/etiona.list
+ if ! diff -q $t $f; then
+ sudo cp $t $f
+ n s chmod 644 $f
+ m p update
+ fi
+
+ sd /etc/apt/preferences.d/debian-goodies <<EOF
+Package: debian-goodies
+Pin: release n=etiona
+Pin-Priority: 1005
+EOF
+
+
+ sd /etc/apt/preferences.d/flidas-buster <<EOF
+Package: *
+Pin: release n=buster
+Pin-Priority: -100
+
+Package: *
+Pin: release n=buster-updates
+Pin-Priority: -100
+EOF
+
+ # dont use buster because it causes dist-upgrade to think its downgrading
+ # packages while really just reinstalling the same version.
+ f=/etc/apt/apt.conf.d/01iank
+ sudo rm -fv $f
+ # # stupid buster uses some key algorithm not supported by flidas gpg that apt uses.
+ # sd /etc/apt/apt.conf.d/01iank <<'EOF'
+ # Acquire::AllowInsecureRepositories "true";
+ # EOF
+
+ f=/etc/apt/sources.list.d/buster.list
+ sudo rm -fv $f
+ # t=$(mktemp)
+ # cat >$t <<EOF
+ # deb http://http.us.debian.org/debian buster main
+ # deb-src http://http.us.debian.org/debian buster main
+
+ # deb http://security.debian.org/ buster/updates main
+ # deb-src http://security.debian.org/ buster/updates main
+
+ # deb http://http.us.debian.org/debian buster-updates main
+ # deb-src http://http.us.debian.org/debian buster-updates main
+ # EOF
+ # if ! diff -q $t $f; then
+ # s cp $t $f
+ # s chmod 644 $f
+ # p update
+ # fi
+
+ # newer version needed for false positive in checkrestart.
+ # I did buster at first, but other problem above with having
+ # buster repos. not sure if the false positive exists in etiona.
+ pi debian-goodies
+
+ sd /etc/apt/preferences.d/shellcheck <<EOF
+Package: shellcheck
+Pin: release a=etiona
+Pin-Priority: 1005
+
+Package: shellcheck
+Pin: release a=etiona-updates
+Pin-Priority: 1005
+
+Package: shellcheck
+Pin: release a=etiona-security
+Pin-Priority: 1005
+EOF
+
+ sd /etc/apt/preferences.d/bash <<EOF
+Package: bash
+Pin: release a=etiona
+Pin-Priority: 1005
+
+Package: bash
+Pin: release a=etiona-updates
+Pin-Priority: 1005
+
+Package: bash
+Pin: release a=etiona-security
+Pin-Priority: 1005
+EOF
+
+
+ ;;
+ *)
+ if isdeb; then
+ pi debian-goodies shellcheck
+ fi
+ ;;
+esac
+######### end flidas pinned packages ######
+
+##### begin automatic upgrades (after checkrestart has been installed) ####
+sd /etc/apt/apt.conf.d/10periodic <<'EOF'
+# this file was mostly just comments.
+APT::Periodic::Update-Package-Lists "1";
+APT::Periodic::Download-Upgradeable-Packages "1";
+APT::Periodic::AutocleanInterval "7";
+APT::Periodic::Unattended-Upgrade "1";
+EOF
+
+sd /etc/apt/apt.conf.d/50unattended-upgrades <<EOF
+# fyi: default file has comments about available options,
+# you may want to read that, do pkx unattended-upgrades
+Unattended-Upgrade::Mail "root";
+Unattended-Upgrade::MailOnlyOnError "true";
+Unattended-Upgrade::Remove-Unused-Dependencies "true";
+Unattended-Upgrade::Origins-Pattern {
+ # default is just security updates. this list found from reading
+ # match_whitelist_string() in $(which unattended-upgrades)
+ "o=*,l=*,a=*,c=*,site=*,n=*";
+};
+EOF
+
+# old names, too verbose
+sudo rm -f /etc/cron.d/unattended-upgrade-reboot /usr/local/bin/zelous-unattended-reboot
+
+sd /etc/cron.d/myupgrade <<'EOF'
+# Setup reboots when running outdated stuff, unattended upgrades happen
+# at 6 am + rand(60 min).
+
+# default is /bin/sh
+SHELL=/bin/bash
+# default is /usr/bin:/bin
+PATH=/usr/bin:/bin:/usr/local/bin
+20 7 * * * iank myupgrade |& log-once -1 myupgrade
+0 * * * * root mycheckrestart |& log-once -1 mycheckrestart
+EOF
+##### end automatic upgrades ####
+
+
+
+###### begin website setup
case $HOSTNAME in
- lj|li)
+ li|l2)
+ pi bind9
+ f=/var/lib/bind/db.b8.nz
+ if [[ ! -e $f ]]; then
+ ser stop bind9
+ sudo rm -fv $f.jnl
+ sudo install -m 644 -o bind -g bind /p/c/machine_specific/linode/bind-initial/db.b8.nz $f
+ ser restart bind9
+ fi
+ ;;&
+ l2)
+ # setup let's encrypt cert
+ m web-conf apache2 l2.b8.nz
+ sudo rm -fv /etc/apache2/sites-enabled/l2.b8.nz{,-redir}.conf
+ ser reload apache2
+ s lnf -T /etc/letsencrypt/live/l2.b8.nz/fullchain.pem /etc/exim4/exim.crt
+ if [[ ! -L /etc/exim4/exim.key ]]; then
+ s lnf -T /etc/letsencrypt/live/l2.b8.nz/privkey.pem /etc/exim4/exim.key
+ mail-setup
+ fi
+ end
+ ;;
+ li)
+
case $HOSTNAME in
- lj) domain=iank.bid; exit 0 ;;
- li) domain=iankelling.org ;;
+ li)
+ m /a/h/setup.sh iankelling.org
+ ;;
+ *)
+ # allow symlinks on other hosts so i can host files in arbitrary paths
+ m /a/h/setup.sh -s
+ ;;
esac
- /a/h/setup.sh $domain
- /a/h/build.rb
-
- sudo -E /a/bin/mediawiki-setup/mw-setup-script
+ m /a/h/build.rb
+ # start mumble only when im going to use it, since i dont use it much
pi-nostart mumble-server
- s $sed -ri "s/^ *(serverpassword=).*/\1$(< /a/bin/bash_unpublished/mumble_pass)/" /etc/mumble-server.ini
+ sudo $sed -ri "s/^ *(serverpassword=).*/\1$(< /a/bin/bash_unpublished/mumble_pass)/" /etc/mumble-server.ini
# do certificate to avoid warning about unsigned cert,
# which is overkill for my use, but hey, I'm cool, I know
# how to do this.
- web-conf apache2 mumble.iankelling.org
- s rm -f /etc/apache2/sites-enabled/mumble.iankelling.org
- sudo -i <<'EOF'
+ m web-conf apache2 mumble.iankelling.org
+ sudo rm -fv /etc/apache2/sites-enabled/mumble.iankelling.org
+ s <<'EOF'
export RENEWED_LINEAGE=/etc/letsencrypt/live/mumble.iankelling.org
/a/bin/distro-setup/certbot-renew-hook
EOF
- sgo mumble-server
- vpn-server-setup -rd
- s tee /etc/openvpn/client-config/mail <<'EOF'
-ifconfig-push 10.8.0.4 255.255.255.0
-EOF
+ # requested from linode via a support ticket.
+ # https://www.linode.com/docs/networking/an-overview-of-ipv6-on-linode/
+ # ipv6 stuff pieced together
+ # via slightly wrong information from
+ # https://github.com/angristan/openvpn-install/blob/master/openvpn-install.sh
+ # https://community.openvpn.net/openvpn/wiki/IPv6
+ # and man openvpn
- # it\'s strange. docker seems to make the default for forward
- # be drop, but then I set it to accept and it\'s stuck that way,
- # I dun know why. But, let\'s make sure we can forward anyways.
- s DEBIAN_FRONTEND=noninteractive pi iptables-persistent
- rm /etc/iptables/rules.v6
- s tee /etc/iptables/rules.v4 <<'EOF'
-*filter
--A FORWARD -i tun+ -o eth0 -j ACCEPT
--A FORWARD -i eth0 -o tun+ -j ACCEPT
-COMMIT
+ m vpn-server-setup -rd 2600:3c00:e000:280::1/64 2600:3c00::f03c:91ff:feb4:0bf3
+ sudo tee /etc/openvpn/client-config/mail <<'EOF'
+ifconfig-push 10.8.0.4 255.255.255.0
+ifconfig-ipv6-push 2600:3c00:e000:280::2/64
EOF
+ if [[ -e /lib/systemd/system/openvpn-server@.service ]]; then
+ vpn_service=openvpn-server@server
+ else
+ vpn_service=openvpn@server
+ fi
sudo dd of=/etc/systemd/system/vpnmail.service <<EOF
[Unit]
ExecStop=/a/bin/distro-setup/vpn-mail-forward stop
[Install]
-WantedBy=openvpn.service
+WantedBy=$vpn_service.service
EOF
ser daemon-reload
- ser enable vpnmail.service
+ sgo vpnmail.service
# needed for li's local mail delivery.
tu /etc/hosts <<<"10.8.0.4 mail.iankelling.org"
- if [[ -e /lib/systemd/system/openvpn-server@.service ]]; then
- vpn_service=openvpn-server@server
- else
- vpn_service=openvpn@server
- fi
sgo $vpn_service
# setup let's encrypt cert
- web-conf apache2 mail.iankelling.org
- s rm /etc/apache2/sites-enabled/mail.iankelling.org{,-redir}.conf
+ m web-conf apache2 mail.iankelling.org
+ sudo rm -fv /etc/apache2/sites-enabled/mail.iankelling.org{,-redir}.conf
ser reload apache2
domain=cal.iankelling.org
web-conf -f 10.8.0.4:5232 - apache2 $domain <<'EOF'
#https://httpd.apache.org/docs/2.4/mod/mod_authn_core.html#authtype
# https://stackoverflow.com/questions/5011102/apache-reverse-proxy-with-basic-authentication
- <Location />
- Options +FollowSymLinks +Multiviews +Indexes
- AllowOverride None
- AuthType basic
- AuthName "Authentication Required"
- # setup one time, with root:www-data, 640
- AuthUserFile "/etc/caldav-htpasswd"
- Require valid-user
- </Location>
+<Location />
+ Options +FollowSymLinks +Multiviews +Indexes
+ AllowOverride None
+ AuthType basic
+ AuthName "Authentication Required"
+ # setup one time, with root:www-data, 640
+ AuthUserFile "/etc/caldav-htpasswd"
+ Require valid-user
+</Location>
EOF
# nginx version of above would be:
# auth_basic "Not currently available";
# auth_basic_user_file /etc/nginx/caldav/htpasswd;
- ########## begin pump.io setup ##########
- # once pump adds a logrotation script, turn off nologger,
- # and add
- # "logfile": "/var/log/pumpio/pumpio.log",
- #
- s dd of=/etc/pump.io.json <<'EOF'
-{
- "secret": "SECRET_REPLACE_ME",
- "driver": "mongodb",
- "params": { "dbname": "pumpio" },
- "noweb": false,
- "site": "pump.iankelling.org",
- "owner": "Ian Kelling",
- "ownerURL": "https://iankelling.org/",
- "port": 8001,
- "urlPort": 443,
- "hostname": "pump.iankelling.org",
- "nologger": true,
- "datadir": "/home/pumpio/pumpdata",
- "enableUploads": true,
- "debugClient": false,
- "disableRegistration": true,
- "noCDN": true,
- "key": "/home/pumpio/privkey.pem",
- "cert": "/home/pumpio/fullchain.pem",
- "address": "localhost",
- "sockjs": false
-}
-EOF
- s sed -i "s#SECRET_REPLACE_ME#$(cat /p/c/machine_specific/li/pump-secret)#" /etc/pump.io.json
-
- # stretch node is too old
- # https://nodejs.org/en/download/package-manager/
- curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
- pi nodejs graphicsmagick mongodb
- cd /home/iank
- if [[ -e pump.io ]]; then
- cd pump.io
- git pull
- else
- git clone https://github.com/pump-io/pump.io.git
- cd pump.io
- fi
- # note: these 2 commands seem
- # note: doing this or the npm install pump.io as root had problems.
- npm install
- npm run build
- # normally, next command would be
- # s npm install -g odb
- # but it\'s this until a bug in pump gets fixed
- # https://github.com/pump-io/pump.io/issues/1287
- s npm install -g databank-mongodb@0.19.2
- if ! getent passwd pumpio &>/dev/null; then
- s useradd -Um -s /bin/false pumpio
- fi
- sudo -u pumpio mkdir -p /home/pumpio/pumpdata
- # for testing browser when only listening to localhost,
- # in the pump.io.json, set hostname localhost, urlPort 5233
- #ssh -L 5233:localhost:5233 li
-
- s mkdir -p /var/log/pumpio/
- s chown pumpio:pumpio /var/log/pumpio/
-
- web-conf - apache2 pump.iankelling.org <<'EOF'
-# currently a bug in pump that we cant terminate ssl
- SSLProxyEngine On
- ProxyPreserveHost On
- ProxyPass / https://127.0.0.1:8001/
- ProxyPassReverse / https://127.0.0.1:8001/
- # i have sockjs disabled per people suggesting that
- # it won\'t work with apache right now.
- # not sure if it would work with this,
- # but afaik, this is pointless atm.
- <Location /main/realtime/sockjs/>
- ProxyPass wss://127.0.0.1:8001/main/realtime/sockjs/
- ProxyPassReverse wss://127.0.0.1:8001/main/realtime/sockjs/
- </Location>
-EOF
+ ###### begin znc setup #####
+ pi znc
- sudo -i <<'EOF'
-export RENEWED_LINEAGE=/etc/letsencrypt/live/pump.iankelling.org
+ # https://wiki.znc.in/FAQ seems to imply that znc doesn\'t need restart after cert change.
+ # to get into the web interface,
+ # then use non-main browser or else it doebsn't allow it based on ocsp stapling from my main site.
+ # https://iankelling.org:12533/
+ s <<'EOF'
+export RENEWED_LINEAGE=/etc/letsencrypt/live/iankelling.org
/a/bin/distro-setup/certbot-renew-hook
EOF
- s dd of=/etc/systemd/system/pump.service <<'EOF'
-[Unit]
-Description=pump.io
-After=syslog.target network.target mongodb.service
-Requires=mongodb.service
-
-[Service]
-Type=simple
-User=pumpio
-Group=pumpio
-ExecStart=/home/iank/pump.io/bin/pump
-Environment=NODE_ENV=production
-# failed to find databank-mongodb without this.
-# I just looked at my environment variables took a guess.
-Environment=NODE_PATH=/usr/lib/nodejs:/usr/lib/node_modules:/usr/share/javascript
-
-[Install]
-WantedBy=multi-user.target
-EOF
- ser daemon-reload
- sgo pump
- ########## end pump.io setup ############
-
-
- ############# begin setup mastodon ##############
-
- # main doc is Docker-Guide.md in docs repo
-
- # I'd like to try gnu social just cuz of gnu, but it's not being
- # well maintained, for example, simple pull requests
- # languishing:
- # https://git.gnu.io/gnu/gnu-social/merge_requests/143
- # and I submitted my own bugs, basic docs are broken
- # https://git.gnu.io/gnu/gnu-social/issues/269
-
- # note, docker required, but we installed it earlier
-
- # i subscrubed to https://github.com/docker/compose/releases.atom
- # to see release notes.
- # i had some problems upgrading. blew things away with
- # docker-compose down
- # docker rmi $(docker images -q)
- # s reboot now
- # when running docker-compose run, kernel stack traces are printed to the journal.
- # things seem to succeed, google says nothing, so ignoring them.
- curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` | s dd of=/usr/local/bin/docker-compose
- s chmod +x /usr/local/bin/docker-compose
-
-
- cd ~
- s rm -rf mastodon
- i clone https://github.com/tootsuite/mastodon
- cd mastodon
- # subbed to atom feed to deal with updates
- git checkout $(git tag | grep -v rc | tail -n1)
-
- # per instructions, uncomment redis/postgres persistence in docker-compose.yml
- sed -i 's/^#//' docker-compose.yml
-
- cat >.env.production <<'EOF'
-REDIS_HOST=redis
-REDIS_PORT=6379
-DB_HOST=db
-DB_USER=postgres
-DB_NAME=postgres
-DB_PASS=
-DB_PORT=5432
-
-LOCAL_DOMAIN=mast.iankelling.org
-LOCAL_HTTPS=true
-
-SINGLE_USER_MODE=true
-
-SMTP_SERVER=mail.iankelling.org
-SMTP_PORT=25
-SMTP_LOGIN=li
-SMTP_FROM_ADDRESS=notifications@mast.iankelling.org
-SMTP_DOMAIN=mast.iankelling.org
-SMTP_DELIVERY_METHOD=smtp
-EOF
-
- for key in PAPERCLIP_SECRET SECRET_KEY_BASE OTP_SECRET; do
- # 1 minute 7 seconds to run this docker command
- # to generate a secret, and it has ^M chars at the end. wtf. really dumb
- printf "%s=%s\n" $key "$(docker-compose run --rm web rake secret|dos2unix|tail -n1)" >>.env.production
- done
- found=false
- while read -r domain port pass; do
- if [[ $domain == mail.iankelling.org ]]; then
- found=true
- # remove the username part
- pass="${pass#*:}"
- printf "SMTP_PASSWORD=%s\n" "$pass" >>.env.production
- break
- fi
- done < <(s cat /etc/mailpass)
- if ! $found; then
- echo "$0: error, failed to find mailpass domain for mastadon"
- exit 1
- fi
-
- # docker compose makes an interface named like br-8f3e208558f2. we need mail to
- # get routed to us.
- if ! s /sbin/iptables -t nat -C PREROUTING -i br-+ -p tcp -m tcp --dport 25 -j DNAT --to-destination 10.8.0.4:25; then
- s /sbin/iptables -t nat -A PREROUTING -i br-+ -p tcp -m tcp --dport 25 -j DNAT --to-destination 10.8.0.4:25
- fi
-
- docker-compose run --rm web rake mastodon:webpush:generate_vapid_key | grep -E '^VAPID_PUBLIC_KEY=|^VAPID_PRIVATE_KEY=' >> .env.production
- logq docker-compose run --rm web rake db:migrate
- docker-compose run --rm web rails assets:precompile
-
- # avatar failed to upload, did
- # docker logs mastodon_web_1
- # google lead me to this
- s chown -R 991:991 public/system
-
- # docker daemon takes care of starting on boot.
- docker-compose up -d
-
- s a2enmod proxy_wstunnel headers
- web-conf -f 3000 - apache2 mast.iankelling.org <<'EOF'
- ProxyPreserveHost On
- RequestHeader set X-Forwarded-Proto "https"
- ProxyPass /500.html !
- ProxyPass /oops.png !
- ProxyPass /api/v1/streaming/ ws://localhost:4000/
- ProxyPassReverse /api/v1/streaming/ ws://localhost:4000/
- ErrorDocument 500 /500.html
- ErrorDocument 501 /500.html
- ErrorDocument 502 /500.html
- ErrorDocument 503 /500.html
- ErrorDocument 504 /500.html
-EOF
-
-
- ############### !!!!!!!!!!!!!!!!!
- ############### manual steps:
-
- # only following a few people atm, so not bothering to figure out backups
- # when mastodon has not documented it at all.
- #
- # fsf@status.fsf.org
- # cwebber@toot.cat
- # dbd@status.fsf.org
- # johns@status.fsf.org
-
- # sign in page is at https://mast.iankelling.org/auth/sign_in
- # register as iank, then
- # https://github.com/tootsuite/documentation/blob/master/Running-Mastodon/Administration-guide.md
- # docker-compose run --rm web bundle exec rails mastodon:make_admin USERNAME=iank
-
- ############# end setup mastodon ##############
-
- # we use nsupdate to update the ip of home
- pi bind9
-
- pi znc
# znc config generated by doing
# znc --makeconf
# selected port is also used in erc config
# sed -i 's,/home/iank/.znc/,/var/lib/znc,' /var/lib/znc/config/znc.conf
# and made a copy of the config files into /p/c
# /msg *status LoadMod --type=global log -sanitize
- # to get into the web interface,
- # cat /etc/letsencrypt/live/iankelling.org/{privkey,cert,chain}.pem > /var/lib/znc/znc.pem
- # then use non-main browser or else it doesn't allow it based on ocsp stapling from my main site.
- # https://iankelling.org:12533/
- # i'm going to figure out how to automate this when it expires. i know i can hook a script into the renewal. https://wiki.znc.in/FAQ seems to imply that znc doesn\'t need restart.
# todo: in config file AllowWeb = true should be false. better security if that is off unless we need it.
# /msg *status LoadMod --type=network perform
# /msg *perform add PRIVMSG ChanServ :invite #fsf-office
# on killing the,
# an example seems to be here: https://github.com/zenspider/elisp/blob/master/rwd-irc.el
# if that was the case i could remove the module clearbufferonmsg
- # alo would be nice if erc supported
+ # also would be nice if erc supported
# https://wiki.znc.in/self-message
# https://wiki.znc.in/Query_buffers \
#
- s useradd --create-home -d /var/lib/znc --system --shell /sbin/nologin --comment "Account to run ZNC daemon" --user-group znc || [[ $? == 9 ]] # 9 if it exists already
- chmod 700 /var/lib/znc
- s chown -R znc:znc /var/lib/znc
- s dd of=/etc/systemd/system/znc.service 2>/dev/null <<'EOF'
-[Unit]
-Description=ZNC, an advanced IRC bouncer
-After=network-online.target
+ if ! getent passwd znc > /dev/null; then
+ sudo useradd --create-home -d /var/lib/znc --system --shell /sbin/nologin --comment "Account to run ZNC daemon" --user-group znc
+ fi
+ sudo chmod 700 /var/lib/znc
+ sudo chown -R znc:znc /var/lib/znc
+ # Avoid restarting if possible, reconnecting to irc is annoying.
+ # The unit file was made active with conflink.
+ # Note, avoid using ser here because we wrap sudo to prints the command first.
+ if [[ $(systemctl is-active znc) != active ]]; then
+ sgo znc
+ fi
+ ###### stop znc setup #####
-[Service]
-ExecStart=/usr/bin/znc -f --datadir=/var/lib/znc
-User=znc
+ end
+ ;;
+esac
+###### end website setup
-[Install]
-WantedBy=multi-user.target
-EOF
- ser daemon-reload
- sgo znc
+########### end section including li/lj ###############
- echo "$0: $(date): ending now)"
- exit 0
+#### desktop stuff
+case $codename_compat in
+ xenial)
+ # mate-indicator-applet and beyond are msc things I noticed diffing a
+ # standard install with mine.
+ pi xorg lightdm mate-desktop-environment mate-desktop-environment-extras mate-indicator-applet anacron
+ ;;
+ stretch)
+ pi task-mate-desktop
+ ;;
+ buster)
+ # mate doesnt have wayland support yet
+ pi task-gnome-desktop
;;
esac
-########### end section including li/lj ###############
+
+
+
+# TODO: some of the X programs can be removed from pall when using wayland
# depends gcc is a way to install suggests. this is apparently the only
# way to install suggests even if the main package is already
# installed. reinstall doesn't work, uninstalling can cause removing
# dependent packages.
-pi ${pall[@]} $(apt-cache search ruby[.0-9]+-doc| awk '{print $1}') $(apt-cache depends gcc|grep -i suggests:| awk '{print $2}')
+pi ${pall[@]} $(apt-cache search ruby[.0-9]+-doc| awk '{print $1}') $(apt-cache depends gcc|grep -i suggests:| awk '{print $2}') $($src/distro-pkgs)
-if ! type pip; then
- x=$(mktemp)
- wget -O$x https://bootstrap.pypa.io/get-pip.py
- python3 $x --user
-fi
sgo fsf-vpn-dns-cleanup
-# website is dead june 14 2019
-s rm -f /etc/apt/sources.list.d/iridium-browser.list
+# website is dead june 14 2019. back in october, but meh
+sudo rm -fv /etc/apt/sources.list.d/iridium-browser.list
# case $distro in
# debian)
# pi chromium ;;
# done
# key already exists, so this won't generate one, just the configs.
-vpn-server-setup -rds
-s tee -a /etc/openvpn/server/server.conf <<'EOF'
+m vpn-server-setup -rds
+sudo tee -a /etc/openvpn/server/server.conf <<'EOF'
push "dhcp-option DNS 10.0.0.1"
push "route 10.0.0.0 255.255.0.0"
client-connect /a/bin/distro-setup/vpn-client-connect
EOF
-s sed -i --follow-symlinks 's/10.8./10.9./g;s/^\s*port\s.*/port 1196/' /etc/openvpn/server/server.conf
+sudo sed -i --follow-symlinks 's/10.8./10.9./g;s/^\s*port\s.*/port 1196/' /etc/openvpn/server/server.conf
if [[ $HOSTNAME == tp ]]; then
if [[ -e /lib/systemd/system/openvpn-server@.service ]]; then
##### rss2email
# note, see bashrc for more documentation.
pi rss2email
-s dd of=/etc/systemd/system/rss2email.service <<'EOF'
+sd /etc/systemd/system/rss2email.service <<'EOF'
[Unit]
Description=rss2email
After=multi-user.target
# we pass options to use different location.
ExecStart=/a/bin/log-quiet/sysd-mail-once -288 rss2email r2e -d /p/c/rss2email.json -c /p/c/rss2email.cfg run
EOF
-s dd of=/etc/systemd/system/rss2email.timer <<'EOF'
+sd /etc/systemd/system/rss2email.timer <<'EOF'
[Unit]
Description=rss2email
[Install]
WantedBy=timers.target
EOF
-s systemctl daemon-reload
+sudo systemctl daemon-reload
######### begin pump.io periodic backup #############
if [[ $HOSTNAME == frodo ]]; then
- s dd of=/etc/systemd/system/pumpbackup.service <<'EOF'
+ sd /etc/systemd/system/pumpbackup.service <<'EOF'
[Unit]
Description=pump li backup
After=multi-user.target
Type=oneshot
ExecStart=/a/bin/log-quiet/sysd-mail-once pump-backup /a/bin/distro-setup/pump-backup
EOF
- s dd of=/etc/systemd/system/pumpbackup.timer <<'EOF'
+ sd /etc/systemd/system/pumpbackup.timer <<'EOF'
[Unit]
Description=pump li backup hourly
[Install]
WantedBy=timers.target
EOF
- s systemctl daemon-reload
+ ser daemon-reload
sgo pumpbackup.timer
fi
######### end pump.io periodic backup #############
######### begin irc periodic backup #############
if [[ $HOSTNAME == frodo ]]; then
- s dd of=/etc/systemd/system/ircbackup.service <<'EOF'
+ sd /etc/systemd/system/ircbackup.service <<'EOF'
[Unit]
Description=irc li backup
After=multi-user.target
[Service]
User=iank
Type=oneshot
-ExecStart=/a/bin/log-quiet/sysd-mail-once irc-backup rsync -rlptDhSAX --delete root@iankelling.org:/var/lib/znc/moddata/log/iank/freenode/ /k/irclogs
+ExecStart=/a/bin/log-quiet/sysd-mail-once irc-backup rsync -rlptDhSAX root@iankelling.org:/var/lib/znc/moddata/log/iank/freenode/ /k/irclogs
EOF
- s dd of=/etc/systemd/system/ircbackup.timer <<'EOF'
+ sd /etc/systemd/system/ircbackup.timer <<'EOF'
[Unit]
Description=irc li backup hourly
[Install]
WantedBy=timers.target
EOF
- s systemctl daemon-reload
+ sudo systemctl daemon-reload
sgo ircbackup.timer
fi
######### end irc periodic backup #############
-# https://github.com/jlebon/textern
-cd /a/opt/textern
-make native-install USER=1
-
case $distro in
debian|trisquel|ubuntu)
# suggests resolvconf package. installing it here is redundant, but make sure anyways.
*) pi openvpn;;
esac
-/a/bin/distro-setup/radicale-setup
-
-## android studio setup
-# this contains the setting for android sdk to point to
-# /a/opt/androidsdk, which is asked upon first run
-lnf /a/opt/.AndroidStudio2.2 ~
-# android site says it needs a bunch of packages for ubuntu,
-# but I googled for debian, and someone says you just need lib32stdc++6 plus the
-# jdk
-# https://pid7007blog.blogspot.com/2015/07/installing-android-studio-in-debian-8.html
-# see w.org for more android studio details
-spa lib32stdc++6 default-jdk
+m /a/bin/distro-setup/radicale-setup
############# begin syncthing setup ###########
# and just run it in one place instead of the normal having a
# separate config. I lean toward using the same config, since btrfs
# syncs between comps.
- case $distro in
- arch) pi syncthing ;;
- trisquel|ubuntu|debian)
- # testing has relatively up to date packages
- if ! isdebian-testing; then
- # based on error when doing apt-get update:
- # E: The method driver /usr/lib/apt/methods/https could not be found.
- pi apt-transport-https
- # google led me here:
- # https://apt.syncthing.net/
- curl -s https://syncthing.net/release-key.txt | sudo apt-key add -
- s="deb http://apt.syncthing.net/ syncthing release"
- if [[ $(cat /etc/apt/sources.list.d/syncthing.list) != $s ]]; then
- echo "$s" | s dd of=/etc/apt/sources.list.d/syncthing.list
- p update
- fi
- fi
- pi syncthing
- ;;
- esac
- lnf -T /w/syncthing /home/iank/.config/syncthing
+ # testing has relatively up to date packages
+ if ! isdebian-testing; then
+ # based on error when doing apt-get update:
+ # E: The method driver /usr/lib/apt/methods/https could not be found.
+ pi apt-transport-https
+ # google led me here:
+ # https://apt.syncthing.net/
+ curl -s https://syncthing.net/release-key.txt | sudo apt-key add -
+ s="deb http://apt.syncthing.net/ syncthing release"
+ if [[ $(cat /etc/apt/sources.list.d/syncthing.list) != "$s" ]]; then
+ echo "$s" | sd /etc/apt/sources.list.d/syncthing.list
+ p update
+ fi
+ fi
+ pi syncthing
+ m lnf -T /w/syncthing /home/iank/.config/syncthing
ser daemon-reload # syncthing likely not properly packaged
sgo syncthing@iank # runs as iank
####### begin misc packages ###########
+case $codename in
+ flidas)
+
+ ;;
+esac
+
+
# sakura config is owned by ian
-reset-sakura
-reset-konsole
-sudo -u traci -i reset-konsole
-# traci xscreensaver we don't want to reset
-reset-xscreensaver
+m reset-sakura
+m reset-konsole
+m sudo -u user2 -i reset-konsole
+# user2 xscreensaver we don't want to reset
+m reset-xscreensaver
# this would install from cabal for newer / consistent version across os, but it screws up xmonad, so disabled for now.
# also, i assume syncing this between machines somehow messed thin
#lnf -T /m/arbtt-capture.log ~/.arbtt/capture.log
-primary-setup
+m primary-setup
if [[ ! -e ~/.linphonerc && -e /p/.linphonerc-initial ]]; then
- cp /p/.linphonerc-initial ~/.linphonerc
+ m cp /p/.linphonerc-initial ~/.linphonerc
fi
### begin spd install
pi libswitch-perl libdigest-md5-file-perl libgnupg-interface-perl
t=$(mktemp)
-wget -O $t http://mirror.fsf.org/fsfsys-trisquel/fsfsys-trisquel/pool/main/s/spd-perl/spd-perl_0.2-1_amd64.deb
-s dpkg -i $t
-rm $t
+m wget -O $t http://mirror.fsf.org/fsfsys-trisquel/fsfsys-trisquel/pool/main/s/spd-perl/spd-perl_0.2-1_amd64.deb
+sudo dpkg -i $t
+m rm $t
# this guesses at the appropriate directory, adjust if needed
-x=(/usr/lib/x86_64-linux-gnu/perl/5.*)
-sudo ln -sf ../../../perl/5.18.2/SPD/ $x
+perldir=(/usr/lib/x86_64-linux-gnu/perl/5.*)
+m sudo ln -sf ../../../perl/5.18.2/SPD/ ${perldir[0]}
# newer distro had gpg2 as default, older one, flidas, need to make it that way
-x=$(which gpg2)
+gpgpath=$(which gpg2)
if [[ $x ]]; then
- s mkdir -p /usr/local/spdhackfix
- s lnf -T $x /usr/local/spdhackfix/gpg
+ sudo mkdir -p /usr/local/spdhackfix
+ s lnf -T $gpgpath /usr/local/spdhackfix/gpg
fi
### end spd install
-if [[ $HOSTNAME == kw ]]; then
- cat <<'EOF'
-NOTE: after this finishes, i did
-s nmtui-connect
-# remove br from auto:
-s vim /etc/network/interfaces
-EOF
-fi
-
# nagstamon setting which were set through the ui
# in filters tab:
# all unknown sources
# but i'm just going to rely on the webpage plus sms for now.
-case $distro in
- debian|trisquel|ubuntu)
- # it asks if it should make users in it's group capture packets without root,
- # which is arguably more secure than running wireshark as root. default is no,
- # which is what i prefer, since I plan to use tcpdump to input to wireshark.
- s DEBIAN_FRONTEND=noninteractive pi wireshark-gtk
- ;;
- # others unknown
-esac
-
-case $(debian-codename) in
- # needed for debootstrap scripts for fai since fai requires debian
- flidas)
- curl http://archive.ubuntu.com/ubuntu/project/ubuntu-archive-keyring.gpg | s apt-key add -
- s dd of=/etc/apt/preferences.d/flidas-xenial <<EOF
-Package: *
-Pin: release a=xenial
-Pin-Priority: -100
-
-Package: *
-Pin: release a=xenial-updates
-Pin-Priority: -100
-
-Package: *
-Pin: release a=xenial-security
-Pin-Priority: -100
-EOF
- s dd of=/etc/apt/sources.list.d/xenial.list 2>/dev/null <<EOF
-deb http://us.archive.ubuntu.com/ubuntu/ xenial main
-deb http://us.archive.ubuntu.com/ubuntu/ xenial-updates main
-deb http://us.archive.ubuntu.com/ubuntu/ xenial-security main
-EOF
-
- s apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
- s dd of=/etc/apt/preferences.d/flidas-bionic <<EOF
-Package: *
-Pin: release a=bionic
-Pin-Priority: -100
-
-Package: *
-Pin: release a=bionic-updates
-Pin-Priority: -100
-
-Package: *
-Pin: release a=bionic-security
-Pin-Priority: -100
-EOF
-
- # better to run btrfs-progs which matches our kernel version
- # (note, renamed from btrfs-tools)
- s dd of=/etc/apt/preferences.d/btrfs-progs <<EOF
-Package: btrfs-progs libzstd1
-Pin: release a=bionic
-Pin-Priority: 1005
-
-Package: btrfs-progs libzstd1
-Pin: release a=bionic-updates
-Pin-Priority: 1005
-
-Package: btrfs-progs libzstd1
-Pin: release a=bionic-security
-Pin-Priority: 1005
-EOF
-
-
- t=$(mktemp)
- cat >$t <<EOF
-deb http://us.archive.ubuntu.com/ubuntu/ bionic main
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-updates main
-deb http://us.archive.ubuntu.com/ubuntu/ bionic-security main
-EOF
- f=/etc/apt/sources.list.d/bionic.list
- if ! diff -q $t $f; then
- s cp $t $f
- s chmod 644 $f
- p update
- fi
-
- # no special reason, but its better for btrfs-progs to
- # be closer to our kernel version
- pi btrfs-progs
-
- t=$(mktemp -d)
- cd $t
- aptitude download debootstrap/xenial
- ex *
- ex data.tar.gz
- s cp ./usr/share/debootstrap/scripts/* /usr/share/debootstrap/scripts
-
- ;;
-esac
+# it asks if it should make users in it's group capture packets without root,
+# which is arguably more secure than running wireshark as root. default is no,
+# which is what i prefer, since I plan to use tcpdump to input to wireshark.
+s DEBIAN_FRONTEND=noninteractive pi wireshark-gtk
# /run and /dev/shm are listed as required for pulseaudio. All 4 in the group
# listed in the default config as suggested.
fi
apps=($@)
d=/nocow/schroot/$n
- s dd of=/etc/schroot/chroot.d/$n.conf <<EOF
+ sd /etc/schroot/chroot.d/$n.conf <<EOF
[$n]
description=$n
type=directory
directory=$d
profile=desktop
preserve-environment=true
-users=$USER,traci
+users=$USER,user2
EOF
if [[ -e $d/bin ]]; then
- s chroot $d apt-get update
- s chroot $d apt-get -y dist-upgrade --purge --auto-remove
- cd; s schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
+ sudo chroot $d apt-get update
+ sudo chroot $d apt-get -y dist-upgrade --purge --auto-remove
+ cd; sudo schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
else
- s mkdir -p $d
+ sudo mkdir -p $d
- s debootstrap $n $d $repo
- cd; s schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
+ sudo debootstrap $n $d $repo
+ cd; sudo schroot -c $n -- apt-get install --allow-unauthenticated -y ${apps[@]}
fi
- s cp -P {,$d}/etc/localtime
+ sudo cp -P {,$d}/etc/localtime
}
-s dd of=/etc/systemd/system/schrootupdate.service <<'EOF'
+sd /etc/systemd/system/schrootupdate.service <<'EOF'
[Unit]
Description=schrootupdate
After=multi-user.target
Type=oneshot
ExecStart=/a/bin/log-quiet/sysd-mail-once schrootupdate /a/bin/distro-setup/schrootupdate
EOF
-s dd of=/etc/systemd/system/schrootupdate.timer <<'EOF'
+sd /etc/systemd/system/schrootupdate.timer <<'EOF'
[Unit]
Description=schrootupdate
[Install]
WantedBy=timers.target
EOF
-s systemctl daemon-reload
+ser daemon-reload
sgo schrootupdate.timer
-
# for my roommate
case $distro in
trisquel)
- mkschroot debian stretch firefox-esr pulseaudio chromium
+ m mkschroot debian stretch firefox-esr pulseaudio chromium
+ ;;
+ debian)
+ pi chromium
;;
esac
-s mkdir -p /nocow/user
-s chown $USER:$USER /nocow/user
-case $distro in
- trisquel|ubuntu)
- pi anki
+sudo mkdir -p /nocow/user
+sudo chown $USER:$USER /nocow/user
+pi anki
+
+
+####### begin transmission
+
+case $HOSTNAME in
+ frodo)
+ tdir=/i/k
+ ;;
+ *)
+ tdir=/nocow/user
;;
- # others unknown
esac
-
# adapted from /var/lib/dpkg/info/transmission-daemon.postinst
# 450 seems likely to be unused. we need to specify one or else
# it won't be stable across installs.
if ! getent passwd debian-transmission > /dev/null; then
- s groupadd -g 450 debian-transmission
- case $distro in
- arch)
- s useradd \
- --system \
- --create-home \
- --gid 450 \
- --uid 450 \
- --home-dir /var/lib/transmission-daemon \
- --shell /bin/false \
- debian-transmission
- ;;
- *)
- s adduser --quiet \
- --gid 450 \
- --uid 450 \
- --system \
- --no-create-home \
- --disabled-password \
- --home /var/lib/transmission-daemon \
- debian-transmission
- ;;
- esac
+ sudo groupadd -g 450 debian-transmission
+ sudo adduser --quiet \
+ --gid 450 \
+ --uid 450 \
+ --system \
+ --no-create-home \
+ --disabled-password \
+ --home /var/lib/transmission-daemon \
+ debian-transmission
fi
# We want group writable stuff from transmission.
# However, after setting this, I learn that transmission sets it's
# own umask based on it's settings file. Well, no harm leaving this
# so it's set right from the beginning.
-s chfn debian-transmission -o umask=0002
+sudo chfn debian-transmission -o umask=0002
-case $distro in
- debian|trisquel|ubuntu)
- # note i had to do this, which is persistent:
- # cd /i/k
- # s chgrp debian-transmission torrents partial-torrents
-
- # syslog says things like
- # 'Failed to set receive buffer: requested 4194304, got 425984'
- # google suggets giving it even more than that
- tu /etc/sysctl.conf<<'EOF'
+# note i had to do this, which is persistent:
+# cd /i/k
+# s chgrp debian-transmission torrents partial-torrents
+
+# syslog says things like
+# 'Failed to set receive buffer: requested 4194304, got 425984'
+# google suggets giving it even more than that
+tu /etc/sysctl.conf<<'EOF'
net.core.rmem_max = 67108864
net.core.wmem_max = 16777216
EOF
- s sysctl -p
-
- # some reason it doesn\'t seem to start automatically anyways
- pi-nostart transmission-daemon
- # be extra sure its not started
- ser disable transmission-daemon
- ser stop transmission-daemon
-
- # the folder was moved here after an install around 02/2017.
- # it contains runtime data,
- # plus a simple symlink to the config file which it\'s
- # not worth separating out.
- # between comps, the uid can change
- f=/i/transmission-daemon
- s lnf -T $f /var/lib/transmission-daemon/.config/transmission-daemon
- if [[ -e $f ]]; then
- s chown -R debian-transmission:debian-transmission $f
- fi
- for f in /i/k/partial-torrents /i/k/torrents; do
- if [[ -e $f ]]; then
- s chown -R debian-transmission:traci $f
- fi
- done
- s chown -R debian-transmission:debian-transmission /var/lib/transmission-daemon
- #
- # config file documented here, and it\'s the same config
- # for daemon vs client, so it\'s documented in the gui.
- # https://trac.transmissionbt.com/wiki/EditConfigFiles#Options
- #
- # I originaly setup rpc-whitelist, but after using
- # routing to a network namespace, it doesn\'t see the
- # real source address, so it\'s disabled.
- #
- # Changed the cache-size to 256 mb, reduces disk use.
- # It is a read & write cache.
- #
- s ruby <<'EOF'
+sudo sysctl -p
+
+# some reason it doesn\'t seem to start automatically anyways
+pi-nostart transmission-daemon
+# be extra sure its not started
+ser disable transmission-daemon
+ser stop transmission-daemon
+
+# the folder was moved here after an install around 02/2017.
+# it contains runtime data,
+# plus a simple symlink to the config file which it\'s
+# not worth separating out.
+# between comps, the uid can change
+f=$tdir/transmission-daemon
+mkdir -p $f
+s lnf -T $f /var/lib/transmission-daemon/.config/transmission-daemon
+s lnf -T /etc/transmission-daemon/settings.json $f/settings.json
+sudo chown -R debian-transmission:debian-transmission $f
+for f in $tdir/partial-torrents $tdir/torrents; do
+ if [[ -e $f ]]; then
+ sudo chown -R debian-transmission:user2 $f
+ fi
+done
+sudo chown -R debian-transmission:debian-transmission /var/lib/transmission-daemon
+#
+# config file documented here, and it\'s the same config
+# for daemon vs client, so it\'s documented in the gui.
+# https://trac.transmissionbt.com/wiki/EditConfigFiles#Options
+#
+# I originaly setup rpc-whitelist, but after using
+# routing to a network namespace, it doesn\'t see the
+# real source address, so it\'s disabled.
+#
+# Changed the cache-size to 256 mb, reduces disk use.
+# It is a read & write cache.
+sudo ruby <<EOF
require 'json'
p = '/etc/transmission-daemon/settings.json'
File.write(p, JSON.pretty_generate(JSON.parse(File.read(p)).merge({
'rpc-whitelist-enabled' => false,
'rpc-authentication-required' => false,
-'incomplete-dir' => '/i/k/partial-torrents',
+'incomplete-dir' => '$tdir/partial-torrents',
'incomplete-dir-enabled' => true,
-'download-dir' => '/i/k/torrents',
+'download-dir' => '$tdir/torrents',
"speed-limit-up" => 800,
"speed-limit-up-enabled" => true,
"peer-port" => 61486,
"ratio-limit-enabled" => true,
})) + "\n")
EOF
-
- ;;
- # todo: others unknown
-esac
+####### end transmission
vpn_ser=openvpn
fi
-s dd of=/etc/systemd/system/transmission-daemon-nn.service <<EOF
+sd /etc/systemd/system/transmission-daemon-nn.service <<EOF
[Unit]
Description=Transmission BitTorrent Daemon netns
After=network.target
# the password is randomly generated on first run, i copied it out
# so it could be used by other hosts.
- s ruby <<'EOF'
+ sudo ruby <<'EOF'
require 'json'
p = '/etc/transmission-daemon/settings.json'
s = JSON.parse(File.read(p))
rpc_pass=$(</p/transmission-rpc-pass)
for f in /home/*; do
u=${f##*/}
- if [[ ! $(id -u $u) -ge 1000 ]]; then
+ uid=$(id -u $u 2>/dev/null) || continue
+ if [[ ! $uid -ge 1000 ]]; then
continue
fi
d=$f/.config/transmission-remote-gtk
- s -u $u mkdir -p $d
- s -u $u dd of=$d/config.json <<EOF
+ sudo -u $u mkdir -p $d
+ sudo -u $u dd of=$d/config.json <<EOF
{
"profiles" : [
- {
+ {
"profile-name" : "Default",
"hostname" : "transmission.b8.nz",
+
"rpc-url-path" : "/transmission/rpc",
"username" : "",
"password" : "$rpc_pass",
"update-interval" : 3,
"min-update-interval" : 3,
"session-update-interval" : 60,
- "exec-commands" : [
- ],
- "destinations" : [
- ]
- }
+ "exec-commands" : [],
+ "destinations" : []
+ },
+ {
+ "profile-name" : "local",
+ "hostname" : "10.173.0.2",
+
+ "username" : "",
+ "password" : "$rpc_pass",
+ "auto-connect" : true,
+ "ssl" : false,
+ "timeout" : 40,
+ "retries" : 3,
+ "update-active-only" : false,
+ "activeonly-fullsync-enabled" : false,
+ "activeonly-fullsync-every" : 2,
+ "update-interval" : 3,
+ "min-update-interval" : 3,
+ "session-update-interval" : 60,
+ "exec-commands" : [],
+ "destinations" : []
+ }
],
"profile-id" : 0,
"add-options-dialog" : false
fi
######### end transmission client setup ######
-# dunno why it\'s there, but get rid of it
-case $HOSTNAME in
- li|lj) s rm -rf /home/linode ;;
-esac
-
### printer setup
-case $distro in
- arch)
- pi cups ghostscript gsfonts # from arch wiki cups page
- pi hplip # from google
- s gpasswd -a $USER sys # from arch wiki
- sgo org.cups.cupsd.service
- ;;
- debian|trisquel|ubuntu)
- pi cups
- s gpasswd -a $USER lpadmin # based on ubuntu wiki
- spa hplip
- ;;
- # other distros unknown
-esac
+pi cups hplip
+sudo gpasswd -a $USER lpadmin # based on ubuntu wiki
# goto http://127.0.0.1:631
# administration tab, add new printer button.
# In debian, I could use hte recommended driver,
# allow user to run vms, from debian handbook
-for x in iank traci; do s usermod -a -G libvirt,kvm $x; done
+for x in iank user2; do s usermod -a -G libvirt,kvm $x; done
# bridge networking as user fails. google lead here, but it doesn\'t work:
# oh well, I give up.
# http://wiki.qemu.org/Features-Done/HelperNetworking
# s mkdir /etc/qemu
# f=/etc/qemu/bridge.conf
-# s dd of=$f <<'EOF'
+# sd $f <<'EOF'
# allow br0
# EOF
# #s chown root:qemu $f # debian has somethig like qemu-libvirt. equivalent?
# general known for debian/ubuntu, not for fedora
-case $distro in
- debian|trisquel|ubuntu)
- if ! type -p go &>/dev/null; then
- cd $(mktemp -d)
- # just the latest stable at the time of writing
- # TODO, maybe put this all into a build script,
- # and do some automatic updates
- wget -q https://dl.google.com/go/go1.12.4.linux-amd64.tar.gz
- s tar -C /usr/local -xzf go1.12.4.linux-amd64.tar.gz
- rm -f *
- fi
- go get -u mvdan.cc/fdroidcl
- # a bit of googling, and added settings to bashrc
- ;;
-esac
-
-case $distro in
- arch)
- pi virt-install
- # otherwise we get error about accessing kvm module.
- # seems like there might be a better way, but google was a bit vague.
- s $sed -ri '/^ *user *=/d' /etc/libvirt/qemu.conf
- echo 'user = "root"' | s tee -a /etc/libvirt/qemu.conf
- # guessing this is not needed
- #sgo virtlogd.service
-
- # iank: disabed as im not using libvirt usually
- # # https://bbs.archlinux.org/viewtopic.php?id=206206
- # # # this should prolly go in the wiki
- # sgo virtlogd.socket
- # sgo libvirtd
- ;;
- debian|trisquel|ubuntu)
- pi-nostart virtinst virt-manager
- ;;
-
-esac
-
-
-case $distro in
- fedora)
- cd $(mktemp -d)
- wget ftp://ftp.gnu.org/pub/gnu/global/global-6.5.7.tar.gz
- ex global*
- cd global-6.5.7
- # based on https://github.com/leoliu/ggtags
- ./configure --with-exuberant-ctags=/usr/bin/ctags
- make
- s make install
- pip install pygments
- ;;
- *)
- pi global
- ;;&
- arch)
- pi python2-pygments
- ;;
- debian|trisquel|ubuntu)
- pi python-pygments
- ;;
-esac
-
-
+m /a/bin/buildscripts/go
+m /a/bin/buildscripts/rust
+m /a/bin/buildscripts/misc
+pi-nostart virtinst virt-manager
-# note this failed running at the beginning of this file,
-# because no systemd user instance was running.
-# Doing systemd --user resulted in
-# Trying to run as user instance, but $XDG_RUNTIME_DIR is not set
-if isdebian-testing; then
- # as of 7/2016, has no unstable deps, and is not in testing anymore.
- pi synergy/unstable
-else
- pi synergy
-fi
-
-# case $distro in
-# # ubuntu unknown. probably the same as debian, just check if the
-# # init scripts come with the package.
-# debian)
-# # copied from arch, but moved to etc
-# s dd of=/etc/systemd/user/synergys.service <<'EOF'
-# [Unit]
-# Description=Synergy Server Daemon
-# After=network.target
-
-# [Service]
-# User=%i
-# ExecStart=/usr/bin/synergys --no-daemon --config /etc/synergy.conf
-# Restart=on-failure
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# s dd of=/etc/systemd/user/synergys.socket <<'EOF'
-# [Unit]
-# Conflicts=synergys@.service
-
-# [Socket]
-# ListenStream=24800
-# Accept=false
-
-# [Install]
-# WantedBy=sockets.target
-# EOF
-# # had this fail with 'Failed to connect to bus: No such file or directory'
-# # then when I tried it manually, it worked fine...
-# if ! systemctl --user daemon-reload; then
-# sleep 2
-# echo retrying systemd user daemon reload
-# systemctl --user daemon-reload
-# fi
-# ;;&
-# *)
-# # taken from arch wiki.
-# s dd of=/etc/systemd/system/synergyc@.service <<'EOF'
-# [Unit]
-# Description=Synergy Client
-# After=network.target
-
-# [Service]
-# User=%i
-# ExecStart=/usr/bin/synergyc --no-daemon frodo
-# Restart=on-failure
-# # per man systemd.unit, StartLimitInterval, by default we
-# # restart more than 5 times in 10 seconds.
-# # And this param defaults too 200 miliseconds.
-# RestartSec=3s
-
-# [Install]
-# WantedBy=multi-user.target
-# EOF
-# s systemctl daemon-reload
-# case $HOSTNAME in
-# x2|x3|tp)
-# ser enable synergyc@iank
-# ser start synergyc@iank ||: # X might not be running yet
-# ;;
-# frodo)
-# systemctl --user start synergys ||:
-# systemctl --user enable synergys
-# ;;
-# esac
-# ;;
-# esac
-
-pi --no-install-recommends kdeconnect-plasma
+pi --no-install-recommends kdeconnect
### kdeconnect for gnome. started in /a/bin/distro-setup/desktop-20-autostart.sh
### but gnome + xmonad not working in flidas, so i disabled it
-pi libgtk-3-dev python3-requests-oauthlib valac cmake python-nautilus libappindicator3-dev
-cd /a/opt/indicator-kdeconnect
-mkdir -p build
-cd build
-cmake .. -DCMAKE_INSTALL_PREFIX=/usr
-make
-sudo make install
-# we can start it manually with /usr/lib/x86_64-linux-gnu/libexec/kdeconnectd
-# it seems, according to
-# /etc/xdg/autostart/kdeconnectd.desktop
-# I'm not seeing the icon, but the clipboard replication is working
+# pi libgtk-3-dev python3-requests-oauthlib valac cmake python-nautilus libappindicator3-dev
+# cd /a/opt/indicator-kdeconnect
+# mkdir -p build
+# cd build
+# cmake .. -DCMAKE_INSTALL_PREFIX=/usr
+# make
+# sudo make install
+# # we can start it manually with /usr/lib/x86_64-linux-gnu/libexec/kdeconnectd
+# # it seems, according to
+# # /etc/xdg/autostart/kdeconnectd.desktop
+# # I'm not seeing the icon, but the clipboard replication is working
### model 01 arduino support ###
# set arduino var in bashrc,
# have system config file setup too.
sudo adduser $USER dialout
-case $distro in
- arch)
- sudo usermod -a -G uucp $USER
- ;;
-esac
# this is for the mail command too. update-alternatives is kind of misleading
# since at least it's main commands pretend mail does not exist.
# bsd's mail got pulled in on some dumb dependency, i dunno how.
-s update-alternatives --set mailx /usr/bin/mail.mailutils
+sudo update-alternatives --set mailx /usr/bin/mail.mailutils
######### end misc packages #########
# -o on, turn on 4 hour period non-performance degrading testing.
# short test daily 2-3am, extended tests Saturdays between 3-4am:
sched="-s (S/../.././02|L/../../6/03)"
-s sed -i --follow-symlinks "s#^[[:space:]]*DEVICESCAN.*#\
+sudo sed -i --follow-symlinks "s#^[[:space:]]*DEVICESCAN.*#\
DEVICESCAN -a -o on -S on -n standby,q $sched \
-m ian@iankelling.org -M exec /usr/local/bin/smart-notify#" /etc/smartd.conf
# conflicts with the normal dnsmasq package.
f=/etc/NetworkManager/NetworkManager.conf
m=$(md5sum $f)
-s sed -ri '/ *\[main\]/,/^ *\[[^]]+\]/{/^\s*dns[[:space:]=]/d}' $f
+sudo sed -ri '/ *\[main\]/,/^ *\[[^]]+\]/{/^\s*dns[[:space:]=]/d}' $f
+sudo sed -ri '/ *\[main\]/a dns=default' $f
if [[ $m != $(md5sum $f) ]]; then
srestart NetworkManager
fi
# make my /etc/fonts/conf.d/ get used.
# I have a new sans-serif font there because the default one
# displays l and I as the same char, grrrrr.
-s fc-cache
+sudo fc-cache
-/a/bin/distro-setup/mymimes
+m /a/bin/distro-setup/mymimes
sgo dynamicipupdate
# dbus-launch makes this work within an ssh connection, otherwise you get this message,
# with still 0 exit code.
# dconf-WARNING **: failed to commit changes to dconf: Cannot autolaunch D-Bus without X11 $DISPLAY
-dbus-launch gsettings set org.gnome.desktop.media-handling automount-open false
+m dbus-launch gsettings set org.gnome.desktop.media-handling automount-open false
# on grub upgrade, we get prompts unless we do this
devs=()
-for dev in $(s btrfs fi show /boot | sed -nr 's#.*path\s+(\S+)$#\1#p'); do
- devs+=($(devbyid $dev),)
+for dev in $(s btrfs fil show /boot | sed -nr 's#.*path\s+(\S+)$#\1#p'); do
+ devs+=("$(devbyid $dev),")
done
devs[-1]=${devs[-1]%,} # jonied by commas
-s debconf-set-selections <<EOF
+sudo debconf-set-selections <<EOF
grub-pc grub-pc/install_devices multiselect ${devs[*]}
EOF
sgo btrfsmaint.timer
sgo btrfsmaintstop.timer
+# aren't autoupdating this, but I do check on it somewhat regularly.
+m cd /a/opt/btrbk
+sudo make install
-# the wiki backup script from ofswiki.org uses generic paths
-s lnf /p/c/machine_specific/li/mw_vars /root
-s lnf /k/backup/wiki_backup /root
-
-s cedit /etc/goaccess.conf <<'EOF' || [[ $? == 1 ]]
-# all things found from looking around the default config
-# copied existing NCSA Combined Log Format with Virtual Host, plus %L
-log-format %^:%^ %h %^[%d:%t %^] "%r" %s %b "%R" "%u" %D
-time-format %H:%M:%S
-date-format %d/%b/%Y
-log-file /var/log/apache2/access.log
-color-scheme 2
-
-# tip: copy access.log files to a stretch host directory, then run
-# jessie's goaccess is too old for some options, and it\'s
-# not easily installed from a testing.
-# goaccess --ignore-crawlers -f <(cat *) -a -o html > x.html
-EOF
-
-
-case $distro in
- trisquel|ubuntu|debian)
- # unison-gtk second, i want it to be default, not sure if that works
- # with spa. note, I used to install from testing repo when using stable,
- # but it shouldn't be needed since I wrote a script to handle mismatching
- # compilers.
- spa unison unison-gtk
- ;;
- arch)
- spa unison gtk2
- ;;
-esac
-
-case $distro in
- arch)
- # default is alsa, doesn\'t work with with pianobar
- s dd of=/etc/libao.conf <<'EOF'
-default_driver=pulse
-EOF
- ;;
-esac
-
-# note, for jessie, it depends on a higher version of btrfs-tools.
-#
-# # disabled due to my patch being in btrbk
-# case $distro in
-# arch|debian|trisquel|ubuntu) pi btrbk ;;
-# # others unknown
-# esac
-cd /a/opt/btrbk
-s make install
-
-# ian: temporarily disabled while hosts are in flux.
-# if [[ $HOSTNAME == tp ]]; then
-# # backup/sync manually on others hosts for now.
-# sgo btrbk.timer
-# # note: to see when it was last run,
-# # ser list-timers
-# fi
+sgo btrbk.timer
+# note: to see when it was last run,
+# ser list-timers
end_msg <<'EOF'
because the default edge scroll doesn\'t work. Originally found this in debian.
EOF
-case $distro in
- debian)
- # remove dep that came in with desktop
- pu transmission-gtk
- ;;
-esac
+# Remove dep that came in with desktop to fix associations.
+m pu transmission-gtk
+sudo gpasswd -a iank adm #needed for reading logs
-case $distro in
- debian|trisquel|ubuntu) s gpasswd -a iank adm ;; #needed for reading logs
-esac
+m /a/bin/buildscripts/pithosfly
-/a/bin/buildscripts/pithosfly
-#### tor
-case $distro in
- # based on
- # https://www.torproject.org/docs/rpms.html.en
- # https://www.torproject.org/docs/debian.html.en
- # todo: figure out if the running service needs to be restarted upon updates
- # todo on fedora: setup non-dev packages
- fedora)
- s dd of=/etc/yum.repos.d/torproject.repo <<'EOF'
-[tor]
-name=Tor experimental repo
-enabled=1
-baseurl=http://deb.torproject.org/torproject.org/rpm/tor-testing/fc/20/$basearch/
-gpgcheck=1
-gpgkey=http://deb.torproject.org/torproject.org/rpm/RPM-GPG-KEY-torproject.org.asc
-
-[tor-source]
-name=Tor experimental source repo
-enabled=1
-autorefresh=0
-baseurl=http://deb.torproject.org/torproject.org/rpm/tor-testing/fc/20/SRPMS
-gpgcheck=1
-gpgkey=http://deb.torproject.org/torproject.org/rpm/RPM-GPG-KEY-torproject.org.asc
-EOF
+# # Based on guix manual instructions, also added code to profile.
+# # disabled since i'm not using it now.
+# pi nscd
+# if ! type -p guix >/dev/null; then
+# cd $(mktemp -d)
+# wget https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh
+# # added some stuff to envonment.sh for profile based on
+# # manual instructions
+# # wget https://sv.gnu.org/people/viewgpg.php?user_id=15145 -qO - | gpg --import -
+# # echo is to get past prompt
+# yes | sudo -E HOME=$HOME bash guix-install.sh || [[ $? == 141 ]]
+# guix install glibc-utf8-locales
+# guix package --install guile
+# fi
- # to be secure, take a look at the fingerprint reported from the following install, and see if it matches from the link above:
- # 3B9E EEB9 7B1E 827B CF0A 0D96 8AF5 653C 5AC0 01F1
- sgo tor
- /a/bin/buildscripts/tor-browser
- ;;
- ubuntu)
- tu /etc/apt/sources.list "deb http://deb.torproject.org/torproject.org $(debian-codename) main"
- gpg --keyserver keys.gnupg.net --recv 886DDD89
- gpg --export A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89 | sudo apt-key add -
- p update
- pi deb.torproject.org-keyring
- pi tor
- /a/bin/buildscripts/tor-browser
- ;;
- debian)
- pi tor
- /a/bin/buildscripts/tor-browser
- ;;
- arch)
- pi tor tor-browser-en
- sgo tor
- ;;
- # ubuntu unknown
-esac
-# nfs server
-case $distro in
- fedora)
- end_msg <<'EOF'
-fedora todo: disable the firewall or find a way to automate it.
-there's an unused section in t.org for tramikssion firewall setup
-
-fedora manual config for nfs:
-s firewall-config
-change to permanent configuration
-check the box for nfs
-was hard to figure this out, not sure if this is all needed, but
-unblock these too
-mountd: udp/tcp 20048
-portmapper, in firewall-config its called rpc-bind: udp/tcp 111
-troubleshooting, unblock things in rpcinfo -p
-make sure to reload the firewall to load the persistent configuration
+pi tor
+m /a/bin/buildscripts/tor-browser
-EOF
- pi nfs-utils
- sgo nfs-server
- ;;
- debian|trisquel|ubuntu)
- pi nfs-server
- ;;
- arch)
- pi nfs-utils || pending_reboot=true
- sgo rpcbind
- # this failed until I rebooted
- sgo nfs-server
- ;;
-esac
+# nfs server
+pi-nostart nfs-kernel-server
# networkmanager has this nasty behavior on flidas: if the machine
# crashes with dnsmasq running, on subsequent boot, it adds an entry to
# then based on whats in /run/dnsmasq/, i see we can run
# s resolvconf -d NetworkManager
# oh ya, and stoping NetworkManager leaves this crap behind without cleaning it up.
-ser stop NetworkManager
ser disable NetworkManager
tu /etc/exports <<'EOF'
/k 10.0.0.0/24(rw,fsid=0,nohide,no_root_squash,async,no_subtree_check,insecure)
EOF
- s exportfs -rav
+ sudo exportfs -rav
fi
-e "$end_msg_var"
######### begin stuff belonging at the end ##########
-
-if $pending_reboot; then
- echo "$0: pending reboot and then finished. doing it now."
- s reboot now
-else
- echo "$0: $(date): ending now)"
-fi
-exit 0
+end