#+CAPTION: [[file:public/vars.yml][=public/vars.yml=]]
#+BEGIN_SRC conf :tangle public/vars.yml
-front_addr: 192.168.15.5
+front_addr: 192.168.15.3
#+END_SRC
The example address is a private network address because the example
- name: Update hostname.
become: yes
command: hostname -F /etc/hostname
- when: domain_name != ansible_hostname
+ when: domain_name != ansible_fqdn
tags: actualizer
#+END_SRC
src: /home/{{ item }}/Public/HTML
state: link
force: yes
+ follow: false
loop: "{{ usernames }}"
when: members[item].status == 'current'
tags: accounts
PostUp = resolvectl domain %i small.private
[Peer]
-EndPoint = 192.168.15.5:39608
+EndPoint = 192.168.15.3:39608
PublicKey = S+6HaTnOwwhWgUGXjSBcPAvifKw+j8BDTRfq534gNW4=
AllowedIPs = 10.177.87.1
AllowedIPs = 192.168.56.0/24
admin: root
www-data: root
monkey: root
+ root: {{ ansible_user }}
path: /etc/aliases
marker: "# {mark} INSTITUTE MANAGED BLOCK"
notify: New aliases.
# Front
[Peer]
-EndPoint = 192.168.15.5:39608
+EndPoint = 192.168.15.3:39608
PublicKey = S+6HaTnOwwhWgUGXjSBcPAvifKw+j8BDTRfq534gNW4=
AllowedIPs = 10.177.87.1
AllowedIPs = 10.177.87.0/24
#+CAPTION: [[file:private/vars.yml][=private/vars.yml=]]
#+BEGIN_SRC conf :tangle private/vars.yml
-nextcloud_dbpass: ippAgmaygyob
+nextcloud_dbpass: ippAgmaygyobwyt5
#+END_SRC
When the ~mysql_db~ Ansible module supports ~check_implicit_admin~,
flush privileges;
#+END_SRC
+# Similar SQL is used in [[* Restore Nextcloud]] and [[* Install Nextcloud]].
+# Those SQL snippets and the above should be kept in sync!
+
Finally, a symbolic link positions =/Nextcloud/nextcloud/= at
=/var/www/nextcloud/= as expected by the Apache2 configuration above.
Nextcloud itself should always believe that =/var/www/nextcloud/= is
make it so.
#+BEGIN_SRC sh
-sudo chown -R www-data.www-data /Nextcloud/nextcloud/
+sudo chown -R www-data:www-data /Nextcloud/nextcloud/
#+END_SRC
The database is restored with the following commands, which assume the
sudo -u www-data php occ maintenance:data-fingerprint
#+END_SRC
+# Similar SQL is used in Configure Nextcloud and Install Nextcloud.
+# Those SQL snippets and the above should be kept in sync!
+
Finally the administrator surfs to ~http://core/nextcloud/~,
authenticates, and addresses any warnings on the Administration >
Overview web page.
Installing Nextcloud in the newly created =/Nextcloud/= starts with
downloading and verifying a recent release tarball. The following
-example command lines unpacked Nextcloud 23 in =nextcloud/= in
+example command lines unpacked Nextcloud 31 in =nextcloud/= in
=/Nextcloud/= and set the ownerships and permissions of the new
directories and files.
#+BEGIN_SRC sh
cd /Nextcloud/
-tar xzf ~/Downloads/nextcloud-23.0.0.tar.bz2
-sudo chown -R www-data.www-data nextcloud
+tar xjf ~/Downloads/nextcloud-31.0.2.tar.bz2
+sudo chown -R www-data:www-data nextcloud
sudo find nextcloud -type d -exec chmod 750 {} \;
sudo find nextcloud -type f -exec chmod 640 {} \;
#+END_SRC
-According to the latest installation instructions in version 24's
-administration guide, after unpacking and setting file permissions,
-the following ~occ~ command takes care of everything. This command
-currently expects Nextcloud's database and user to exist. The
-following SQL commands create the database and user (entered at the
-SQL prompt of the ~sudo mysql~ command). The shell command then runs
-~occ~.
+According to the latest installation instructions in the Admin Manual
+for version 31 (section "Installation and server configuration",
+subsection "Installing from command line", [[https://docs.nextcloud.com/server/stable/admin_manual/installation/command_line_installation.html][here]]), after unpacking and
+setting file permissions, the following ~occ~ command takes care of
+everything. This command currently expects Nextcloud's database and
+user to exist. The following SQL commands create the database and
+user (entered at the SQL prompt of the ~sudo mysql~ command). The
+shell command then runs ~occ~.
#+BEGIN_SRC sql
create database nextcloud
flush privileges;
#+END_SRC
+# Similar SQL is used in [[* Configure Nextcloud]] and [[* Restore Nextcloud]].
+# Those SQL snippets and the above should be kept in sync!
+
#+BEGIN_SRC sh
cd /var/www/nextcloud/
-sudo -u www-data php occ maintenance:install \
- --data-dir=/var/www/nextcloud/data \
- --database=mysql --database-name=nextcloud \
- --database-user=nextclouduser \
- --database-pass=ippAgmaygyobwyt5 \
- --admin-user=sysadm --admin-pass=PASSWORD
+sudo -u www-data php occ maintenance:install \
+--database='mysql' --database-name='nextcloud' \
+--database-user='nextclouduser' --database-pass='ippAgmaygyobwyt5' \
+--admin-user='sysadm' --admin-pass='fubar'
#+END_SRC
The =nextcloud/config/config.php= is created by the above command, but
[DHCPServerStaticLease]
MACAddress={{ wild.MAC }}
-Address={{ wild.num |ansible.utils.ipaddr('address') }}
+Address={{ wild_net_cidr |ansible.utils.ipaddr(wild.num) }}
{% endfor %}
#+END_SRC
#+NAME: ufw-forward-nat
#+CAPTION: ~ufw-forward-nat~
#+BEGIN_SRC conf
--A ufw-user-forward -i lan -o isp -j ACCEPT
--A ufw-user-forward -i wild -o isp -j ACCEPT
+-A ufw-before-forward -i lan -o isp -j ACCEPT
+-A ufw-before-forward -i wild -o isp -j ACCEPT
#+END_SRC
-If "the standard ~iptables-restore~ syntax" as it is described in the
-~ufw-framework~ manual page, allows continuation lines, please let us
-know!
-
Forwarding rules are also needed to route packets from the campus VPN
(the ~wg0~ WireGuard™ tunnel device) to the institute's LAN and back.
The public VPN on Front will also be included since its packets arrive
#+NAME: ufw-forward-private
#+CAPTION: ~ufw-forward-private~
#+BEGIN_SRC conf
--A ufw-user-forward -i lan -o wg0 -j ACCEPT
--A ufw-user-forward -i wg0 -o lan -j ACCEPT
--A ufw-user-forward -i wg0 -o wg0 -j ACCEPT
+-A ufw-before-forward -i lan -o wg0 -j ACCEPT
+-A ufw-before-forward -i wg0 -o lan -j ACCEPT
+-A ufw-before-forward -i wg0 -o wg0 -j ACCEPT
#+END_SRC
The third rule above may seem curious; it is. It short circuits
** Configure UFW
The following tasks install the Uncomplicated Firewall (UFW), set its
-policy in =/etc/default/ufw=, install the NAT rules in
-=/etc/ufw/before.rules=, and the Forward rules in
-=/etc/ufw/user.rules= (where the ~ufw-user-forward~ chain
-is... mentioned?).
-
-When Gate is configured by ~./abbey config gate~ as in the example
-bootstrap, enabling the firewall should not be a problem. But when
-configuring a new gate with ~./abbey config new-gate~, enabling the
-firewall could break Ansible's current and future ssh sessions. For
-this reason, Ansible /does not/ enable the firewall.
-
-The administrator must login and execute the following command after
-Gate is configured or new gate is "in position" (connected to old
-Gate's ~wild~ and ~isp~ networks).
-
-: sudo ufw enable
+policy in =/etc/default/ufw=, and install the institute's rules in
+=/etc/ufw/before.rules=.
#+CAPTION: [[file:roles_t/gate/tasks/main.yml][=roles_t/gate/tasks/main.yml=]]
#+BEGIN_SRC conf :tangle roles_t/gate/tasks/main.yml :noweb no-export
- { line: "DEFAULT_FORWARD_POLICY=\"DROP\"",
regexp: "^DEFAULT_FORWARD_POLICY=" }
-- name: Configure UFW NAT rules.
+- name: Configure UFW rules.
become: yes
blockinfile:
block: |
:POSTROUTING ACCEPT [0:0]
<<ufw-nat>>
COMMIT
- dest: /etc/ufw/before.rules
- insertafter: EOF
- prepend_newline: yes
-
-- name: Configure UFW FORWARD rules.
- become: yes
- blockinfile:
- block: |
*filter
<<ufw-forward-nat>>
<<ufw-forward-private>>
COMMIT
- dest: /etc/ufw/user.rules
+ dest: /etc/ufw/before.rules
insertafter: EOF
prepend_newline: yes
+
+- name: Enable UFW.
+ become: yes
+ ufw: state=enabled
+ tags: actualizer
#+END_SRC
** Configure Campus WireGuard™ Subnet
mysystem "mkdir --mode=700 Secret/root.gnupg";
mysystem ("gpg --homedir Secret/root.gnupg",
- " --batch --quick-generate-key --passphrase ''",
- " root\@core.$pvt");
+ "--batch --quick-generate-key --passphrase ''",
+ "root\@core.$pvt");
mysystem ("gpg --homedir Secret/root.gnupg",
- " --export --armor --output Secret/root-pub.pem",
- " root\@core.$pvt");
+ "--export --armor --output Secret/root-pub.pem",
+ "root\@core.$pvt");
chmod 0440, "root-pub.pem";
mysystem ("gpg --homedir Secret/root.gnupg",
- " --export-secret-key --armor --output Secret/root-sec.pem",
- " root\@core.$pvt");
+ "--export-secret-key --armor --output Secret/root-sec.pem",
+ "root\@core.$pvt");
chmod 0400, "root-sec.pem";
mysystem "mkdir Secret/ssh_admin";
chmod 0700, "Secret/ssh_admin";
- mysystem ("ssh-keygen -q -t rsa"
- ." -C A\\ Small\\ Institute\\ Administrator",
- " -N '' -f Secret/ssh_admin/id_rsa");
+ mysystem ("ssh-keygen -q -t rsa",
+ "-C A\\ Small\\ Institute\\ Administrator",
+ "-N '' -f Secret/ssh_admin/id_rsa");
mysystem "mkdir Secret/ssh_monkey";
chmod 0700, "Secret/ssh_monkey";
mysystem "echo 'HashKnownHosts no' >Secret/ssh_monkey/config";
mysystem ("ssh-keygen -q -t rsa -C monkey\@core",
- " -N '' -f Secret/ssh_monkey/id_rsa");
+ "-N '' -f Secret/ssh_monkey/id_rsa");
mysystem "mkdir Secret/ssh_front";
chmod 0700, "Secret/ssh_front";
#+CAPTION: [[file:private/members-empty.yml][=private/members-empty.yml=]]
#+BEGIN_SRC conf :tangle private/members-empty.yml :tangle-mode u=rw,g=,o=
---
-members:
+members: {}
usernames: []
clients: []
#+END_SRC
print $O "- $user\n";
}
} else {
- print $O "members:\n";
+ print $O "members: {}\n";
print $O "usernames: []\n";
}
if (@{$yaml->{"clients"}}) {
my $core = `mkpasswd -m sha-512 "$epass"`; chomp $core;
my $vault = strip_vault `ansible-vault encrypt_string "$epass"`;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- " playbooks/nextcloud-new.yml",
- " -e user=$user", " -e pass=\"$epass\"");
+ "playbooks/nextcloud-new.yml",
+ "-e user=$user", "-e pass=\"$epass\"",
+ ">/dev/null");
$members->{$user} = { "status" => "current",
"password_front" => $front,
"password_core" => $core,
"password_fetchmail" => $vault };
write_members_yaml $yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- " -t accounts -l core,front playbooks/site.yml");
+ "-t accounts -l core,front playbooks/site.yml",
+ ">/dev/null");
exit;
}
mysystem ("ansible-playbook -e \@Secret/become.yml",
"playbooks/nextcloud-pass.yml",
- "-e user=$user", "-e \"pass=$epass\"");
+ "-e user=$user", "-e \"pass=$epass\"",
+ ">/dev/null");
write_members_yaml $mem_yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "-t accounts playbooks/site.yml");
+ "-t accounts playbooks/site.yml",
+ ">/dev/null");
my $O = new IO::File;
open ($O, "| sendmail $user\@$domain_priv")
or die "Could not pipe to sendmail: $!\n";
die "$user: does not exist\n" if ! defined $member;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "playbooks/nextcloud-old.yml -e user=$user");
+ "playbooks/nextcloud-old.yml -e user=$user",
+ ">/dev/null");
$member->{"status"} = "former";
write_members_yaml $yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "-t accounts playbooks/site.yml");
+ "-t accounts playbooks/site.yml",
+ ">/dev/null");
exit;
}
#+END_SRC
} else {
die "usage: $0 client [debian|android|campus]\n";
}
- my $yaml;
- $yaml = read_members_yaml;
+ my $yaml = read_members_yaml;
my $members = $yaml->{"members"};
my $member = $members->{$user};
die "$user: does not exist\n"
The networks used in the test:
-- ~premises~ :: A NAT Network, simulating the cloud provider's and
+- ~public~ :: A NAT Network, simulating the cloud provider's and
campus ISP's networks. This is the only network with DHCP and DNS
services provided by the hypervisor. It is not the default NAT
network because ~gate~ and ~front~ need to communicate.
connect the host to ~front~.
In this simulation the IP address for ~front~ is not a public address
-but a private address on the NAT network ~premises~. Thus ~front~ is
+but a private address on the NAT network ~public~. Thus ~front~ is
not accessible by the host, by Ansible on the administrator's
notebook. To work around this restriction, ~front~ gets a second
network interface connected to the ~vboxnet2~ network. The address of
following ~VBoxManage~ commands.
#+BEGIN_SRC sh
-VBoxManage natnetwork add --netname premises \
+VBoxManage natnetwork add --netname public \
--network 192.168.15.0/24 \
--enable --dhcp on --ipv6 off
-VBoxManage natnetwork start --netname premises
+VBoxManage natnetwork start --netname public
VBoxManage hostonlyif create # vboxnet0
VBoxManage hostonlyif ipconfig vboxnet0 --ip=192.168.56.10
VBoxManage hostonlyif create # vboxnet1
VBoxManage hostonlyif ipconfig vboxnet2 --ip=192.168.58.1
#+END_SRC
-Note that only the NAT network ~premises~ should have a DHCP server
-enabled.
+Note that only the NAT network ~public~ should have a DHCP server
+enabled, and it should not lease ~192.168.15.3~. This works, so far.
+The VirtualBox DHCP service seems to start assigning IPs beginning
+with ~192.168.15.5~. It is not simply disabled because Gate is
+expecting the service from its ISP.
Note also that actual ISPs and clouds will provide Gate and Front with
public network addresses. In this simulation "they" provide addresses
<<test-auth>>
#+END_SRC
-Next, the "extra" network interface is configured with a static IP
-address.
+Next, the network interfaces are configured with static IP addresses.
+In actuality, Front gets no network configuration tweaks. The Debian
+12 default is to broadcast for a DHCP lease on the primary NIC. This
+works in the cloud, which should respond with an offer, though it must
+offer the public, DNS-registered, hard-coded ~front_addr~.
+
+For testing purposes, the preparation of ~front~ replaces the default
+=/etc/network/interfaces= with a new configuration that statically
+assigns ~front_addr~ to the primary NIC and a testing subnet address
+to the second NIC.
#+CAPTION: [[file:private/test-front-prep][=private/test-front-prep=]]
#+BEGIN_SRC sh :tangle private/test-front-prep :tangle-mode u=rwx,g=,o=
-cat <<EOF | sudo tee /etc/network/interfaces.d/enp0s8 >/dev/null
+( cd /etc/network/; \
+ [ -f interfaces~ ] || sudo mv interfaces interfaces~ )
+cat <<EOF | sudo tee /etc/network/interfaces >/dev/null
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+source /etc/network/interfaces.d/*
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto enp0s3
+iface enp0s3 inet static
+ address 192.168.15.3/24
+ gateway 192.168.15.1
+
+# Testing interface
auto enp0s8
iface enp0s8 inet static
address 192.168.58.3/24
down and moved to the simulated cloud (from the default NAT network).
The following ~VBoxManage~ commands effect the move, connecting the
-primary NIC to ~premises~ and a second NIC to the host-only network
+primary NIC to ~public~ and a second NIC to the host-only network
~vboxnet2~ (making it directly accessible to the administrator's
notebook as described in [[*The Test Networks][The Test Networks]]).
#+BEGIN_SRC sh
-VBoxManage modifyvm front --nic1 natnetwork --natnetwork1 premises
+VBoxManage modifyvm front --nic1 natnetwork --natnetwork1 public
VBoxManage modifyvm front --nic2 hostonly --hostonlyadapter2 vboxnet2
#+END_SRC
VBoxManage modifyvm gate --mac-address1=080027f31679
VBoxManage modifyvm gate --nic1 hostonly --hostonlyadapter1 vboxnet0
VBoxManage modifyvm gate --mac-address2=0800273d42e5
-VBoxManage modifyvm gate --nic2 natnetwork --natnetwork2 premises
+VBoxManage modifyvm gate --nic2 natnetwork --natnetwork2 public
VBoxManage modifyvm gate --mac-address3=0800274aded2
VBoxManage modifyvm gate --nic3 hostonly --hostonlyadapter3 vboxnet1
#+END_SRC
| device | network | simulating | MAC address variable |
|----------+------------+-----------------+----------------------|
| ~enp0s3~ | ~vboxnet0~ | campus Ethernet | ~gate_lan_mac~ |
-| ~enp0s8~ | ~premises~ | campus ISP | ~gate_isp_mac~ |
+| ~enp0s8~ | ~public~ | campus ISP | ~gate_isp_mac~ |
| ~enp0s9~ | ~vboxnet1~ | campus IoT | ~gate_wild_mac~ |
~gate~ is now prepared for configuration by Ansible.
executed. Note that this first run should exercise all of the
handlers, /and/ that subsequent runs probably /do not/.
+Presumably the ~./inst config~ command completed successfully, but
+before testing begins, ~gate~ is restarted. Basic networking tests
+will fail unless the interfaces on ~gate~ are renamed, and nothing
+less than a restart will get ~systemd-udevd~ to apply the installed
+=.link= files.
+
+# Does this work???
+# udevadm control --reload
+# udevadm trigger -s net -c add -a address=08:00:27:f3:16:79
+# udevadm trigger -s net -c add -a address=08:00:27:4a:de:d2
+# udevadm trigger -s net -c add -a address=08:00:27:3d:42:e5
+
** Test Basics
At this point the test institute is just ~core~, ~gate~ and ~front~,
#+BEGIN_SRC sh
ping -c 1 8.8.4.4 # dns.google
-ping -c 1 192.168.15.5 # front_addr
+ping -c 1 192.168.15.3 # front_addr
#+END_SRC
~gate~ and thus ~core~ should be able to resolve internal and public
** The Test Nextcloud
Further tests involve Nextcloud account management. Nextcloud is
-installed on ~core~ as described in [[*Configure Nextcloud][Configure Nextcloud]]. Once
+installed on ~core~ as described in [[*Install Nextcloud][Install Nextcloud]]. Once
=/Nextcloud/= is created, ~./inst config core~ will validate
or update its configuration files.
At this point, ~dick~ can move abroad, from the campus Wi-Fi
(host-only network ~vboxnet1~) to the broader Internet (the NAT
-network ~premises~). The following command makes the change. The
+network ~public~). The following command makes the change. The
machine does not need to be shut down.
#+BEGIN_SRC s
-VBoxManage modifyvm dick --nic1 natnetwork --natnetwork1 premises
+VBoxManage modifyvm dick --nic1 natnetwork --natnetwork1 public
#+END_SRC
Then the campus VPN is disconnected and the public VPN connected.
- Find it in =/home/dick/Maildir/new/=.
- Re-configure Evolution on ~dick~. Edit the ~dick@small.example.org~
mail account (or create a new one?) so that the Receiving Email
- Server name is ~192.168.15.5~, not ~mail.small.private~. The
+ Server name is ~192.168.15.3~, not ~mail.small.private~. The
latter domain name will not work while the campus is disappeared.
In actual use (with Front, not ~front~), the institute domain name
could be used.
mysystem "mkdir --mode=700 Secret/root.gnupg";
mysystem ("gpg --homedir Secret/root.gnupg",
- " --batch --quick-generate-key --passphrase ''",
- " root\@core.$pvt");
+ "--batch --quick-generate-key --passphrase ''",
+ "root\@core.$pvt");
mysystem ("gpg --homedir Secret/root.gnupg",
- " --export --armor --output Secret/root-pub.pem",
- " root\@core.$pvt");
+ "--export --armor --output Secret/root-pub.pem",
+ "root\@core.$pvt");
chmod 0440, "root-pub.pem";
mysystem ("gpg --homedir Secret/root.gnupg",
- " --export-secret-key --armor --output Secret/root-sec.pem",
- " root\@core.$pvt");
+ "--export-secret-key --armor --output Secret/root-sec.pem",
+ "root\@core.$pvt");
chmod 0400, "root-sec.pem";
mysystem "mkdir Secret/ssh_admin";
chmod 0700, "Secret/ssh_admin";
- mysystem ("ssh-keygen -q -t rsa"
- ." -C A\\ Small\\ Institute\\ Administrator",
- " -N '' -f Secret/ssh_admin/id_rsa");
+ mysystem ("ssh-keygen -q -t rsa",
+ "-C A\\ Small\\ Institute\\ Administrator",
+ "-N '' -f Secret/ssh_admin/id_rsa");
mysystem "mkdir Secret/ssh_monkey";
chmod 0700, "Secret/ssh_monkey";
mysystem "echo 'HashKnownHosts no' >Secret/ssh_monkey/config";
mysystem ("ssh-keygen -q -t rsa -C monkey\@core",
- " -N '' -f Secret/ssh_monkey/id_rsa");
+ "-N '' -f Secret/ssh_monkey/id_rsa");
mysystem "mkdir Secret/ssh_front";
chmod 0700, "Secret/ssh_front";
print $O "- $user\n";
}
} else {
- print $O "members:\n";
+ print $O "members: {}\n";
print $O "usernames: []\n";
}
if (@{$yaml->{"clients"}}) {
my $core = `mkpasswd -m sha-512 "$epass"`; chomp $core;
my $vault = strip_vault `ansible-vault encrypt_string "$epass"`;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- " playbooks/nextcloud-new.yml",
- " -e user=$user", " -e pass=\"$epass\"");
+ "playbooks/nextcloud-new.yml",
+ "-e user=$user", "-e pass=\"$epass\"",
+ ">/dev/null");
$members->{$user} = { "status" => "current",
"password_front" => $front,
"password_core" => $core,
"password_fetchmail" => $vault };
write_members_yaml $yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- " -t accounts -l core,front playbooks/site.yml");
+ "-t accounts -l core,front playbooks/site.yml",
+ ">/dev/null");
exit;
}
mysystem ("ansible-playbook -e \@Secret/become.yml",
"playbooks/nextcloud-pass.yml",
- "-e user=$user", "-e \"pass=$epass\"");
+ "-e user=$user", "-e \"pass=$epass\"",
+ ">/dev/null");
write_members_yaml $mem_yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "-t accounts playbooks/site.yml");
+ "-t accounts playbooks/site.yml",
+ ">/dev/null");
my $O = new IO::File;
open ($O, "| sendmail $user\@$domain_priv")
or die "Could not pipe to sendmail: $!\n";
die "$user: does not exist\n" if ! defined $member;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "playbooks/nextcloud-old.yml -e user=$user");
+ "playbooks/nextcloud-old.yml -e user=$user",
+ ">/dev/null");
$member->{"status"} = "former";
write_members_yaml $yaml;
mysystem ("ansible-playbook -e \@Secret/become.yml",
- "-t accounts playbooks/site.yml");
+ "-t accounts playbooks/site.yml",
+ ">/dev/null");
exit;
}
} else {
die "usage: $0 client [debian|android|campus]\n";
}
- my $yaml;
- $yaml = read_members_yaml;
+ my $yaml = read_members_yaml;
my $members = $yaml->{"members"};
my $member = $members->{$user};
die "$user: does not exist\n"