GitLab Projects
bash_prompt.md
bash prompt
bash prompt without additional fonts installed
put that in your .bashrc file
default="\[\e[49m\]"
bold="\[\033[1m\]"
no_bold="\[\033[21m\]"
blink="\[\033[5m\]"
no_blink="\[\033[25m\]"
fg_white="\[\033[38;5;255m\]"
fg_orange="\[\033[38;5;214m\]"
fg_darkblue="\[\033[38;5;18m\]"
fg_grey="\[\033[38;5;245m\]"
bg_blue="\[\033[48;5;18m\]"
bg_grey="\[\033[48;5;245m\]"
bg_orange="\[\033[48;5;214m\]"
arrow=$'\xEE\x82\xB0'
prompt_command () {
local LOAD=$(uptime | perl -ne '/load average: (\d+[\.,]\d+)/ && print $1');
export PS1="$bg_blue$fg_white \t - $bold$blink$fg_orange\$? $no_blink$fg_darkblue$bg_grey"$arrow"$fg_white$bg_grey$no_bold \u $bg_blue$fg_grey"$arrow"$fg_white \h $fg_darkblue$bg_grey"$arrow"$fg_white \w $fg_grey$bg_orange"$arrow"$fg_darkblue$bold ($LOAD) $fg_orange$default"$arrow"\[$(tput sgr0)\] ";
}
PROMPT_COMMAND=prompt_command
you can change the color codes as you need it
helm.md
helm
create chart
helm create myname
using environments
values.yaml
keepLogDays:
_default: 2
dev: 2
prod: 5
esHost:
_default: "elasticsearch.logging.svc"
dev: "elasticsearch.logging.svc"
prod: "elasticsearch.logging.svc"
curatorSchedule:
_default: "10 9 * * *"
dev: "10 9 * * *"
prod: "15 20 * * *"
curatorVersion:
_default: "praseodym/elasticsearch-curator:latest"
dev: "praseodym/elasticsearch-curator:latest"
prod: "praseodym/elasticsearch-curator:latest"
unit_count: {{ pluck .Values.global.env .Values.keepLogDays | first | default .Values.keepLogDays._default }}
{{- if eq .Values.global.env "prod" -}}
{{- end -}}
install or upgrade
helm3 upgrade --install -n logging --set "global.env=dev" logging .
get secrets and encode base64
_helpers.yaml
{{- define "imagePullSecret" }}
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.imageCredentials.registry (printf "%s:%s" .Values.imageCredentials.username .Values.imageCredentials.password | b64enc) | b64enc }}
{{- end }}
in another template call this template
apiVersion: v1
kind: Secret
metadata:
name: myregistrykey
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{ template "imagePullSecret" . }}
set variable if/else
// if global.loc == ams1 then es-master-1 else es-master-3
{{- $a := ternary "es-master-1" "es-master-3" (eq .Values.global.loc "ams1") }}
get variables per env
// list
elasticVersion:
_default: 1
prod: 2
dev: 3
// get first element of list elasticVersion
{{ pluck .Values.global.env .Values.elasticVersion | first | default .Values.elasticVersion._default }}
set variables
// list
fluentdLimits:
_default:
memory: "256Mi"
dev:
memory: "256Mi"
prod:
memory: "512Mi"
{{- $fluentdLimits := pluck .Values.global.env .Values.fluentdLimits | first }}
{{ get $fluentdLimits "memory" }}
k8s_federation.md
kubernetes Federation
# go to your gopath
cd /opt/go/src/github.com/
systemctl start docker
git clone https://github.com/kubernetes-sigs/federation-v2.git
cd federation-v2
./scripts/download-binaries.sh
make
export PATH=$PATH:/root/federation-v2/bin
# setup kubectl.conf with contexts of each cluster
# install federation to one of the clusters with helm [https://github.com/kubernetes-sigs/federation-v2/blob/master/charts/federation-v2/README.md]
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
EOF
helm init --service-account tiller --upgrade -c
install with helm
helm repo add kubefed-charts https://raw.githubusercontent.com/kubernetes-sigs/kubefed/master/charts
helm search kubefed
helm install kubefed-charts/kubefed --name kubefed --version=0.1.0-rc6 --namespace kube-federation-system
for each cluster you running kubernetes create a cluster in federation make sure you set the kubeconfig context to the cluster which runs the federation
# kubefedctl join <name of cluster> --cluster-context <name of context in your kubeconfig> --host-cluster-context <name of context which holds the federation control plane>
kubefedctl join cluster1 --cluster-context ams1 --host-cluster-context ams1 --v=2
kubefedctl join cluster2 --cluster-context fra1 --host-cluster-context ams1 --v=2
kubectl -n kube-federation-system get kubefedclusters.core.kubefed.io
# make sure all clusters are ready
enable api groups on federation
# the core type will be always enabled
kubefedctl enable <your type>
kubectl --context=cluster1 api-resources
create namespace and federate it
kubectl create namespace federatednamespace
kubefedctl federate namspace federatednamespace
# federation a namespace without --contents will only enable federation types on namespace and doesn't distribute all contents to other clusters
# federate deployment to all clusters
kubefedctl -n federatednamespace federate deployments.apps nginx
arch.md
boot livecd
loadkeys de-latin1
loadskeys /usr/share/kbd/keymaps/i386/qwertz/de-mobii.map.gz
gdisk /dev/sda
oder
fdisk /dev/sda
3 partitions
/boot => ext4
/ => cryptsetup
/swap => swap
dhcpcd enp4s0
connect wlan
wpa_passphrase SSID Passwort > /etc/wpa_supplicant/wpa_supplicant.conf
wpa_supplicant -i wlp0s1 -D wext -c /etc/wpa_supplicant/wpa_supplicant.conf -B
dhcpcd wlp0s1
disk encryption
cryptsetup --verbose --cipher aes-xts-plain64 --key-size 512 --hash sha512 --iter-time 5000 --use-random luksFormat /dev/sda2
cryptsetup open --type luks /dev/sda2 cryptroot
btrfs filesystem
mkfs.btrfs -L thorko /dev/sda2
mount /dev/mapper/cryptroot /mnt
btrfs subvolume create /dev/mapper/cryptroot/opt
btrfs subvolume create /dev/mapper/cryptroot/home
btrfs subvolume create /dev/mapper/cryptroot/var
btrfs subvolume create /dev/mapper/cryptroot/root
umount /mnt
mount -o subvol=root /dev/mapper/cryptroot /mnt
mkdir /mnt/{home,opt,var,boot}
mount -o subvol=home /dev/mapper/cryptroot /mnt/home
...
mount /dev/sda1 /mnt/boot
/etc/fstab
proc /proc proc nosuid,noexec,nodev 0 0
sysfs /sys sysfs nosuid,noexec,nodev 0 0
tmpfs /run tmpfs defaults 0 0
/dev/sda1 /boot ext3 rw,relatime,data=ordered 0 0
/dev/mapper/root / btrfs rw,relatime,ssd,space_cache,discard,autodefrag,compress=lzo,subvol=_active/rootvol 1 1
/dev/mapper/root /home btrfs rw,acl,relatime,ssd,space_cache,discard,autodefrag,compress=lzo,subvol=_active/home 0 0
/dev/mapper/root /var btrfs rw,relatime,ssd,space_cache,discard,autodefrag,compress=lzo,subvol=_active/var 0 0
/dev/mapper/root /opt btrfs rw,acl,relatime,ssd,space_cache,discard,autodefrag,compress=lzo,subvol=_active/opt 0 0
install system
pacstrap /mnt base base-devel wpa_supplicant
create fstab
genfstab -U /mnt >> /mnt/etc/fstab
arch-chroot /mnt
ln -sf /usr/share/zoneinfo/Region/City /etc/localtime
hwclock --systohc
locale-gen
echo LANG=en_US.UTF-8 > /etc/locale.conf
echo LC_COLLATE=C >> /etc/locale.conf
echo LANGUAGE=en_US >> /etc/locale.conf
echo KEYMAP=de-latin1 > /etc/vconsole.conf
echo FONT=lat9w-16 >> /etc/vconsole.conf
vim /etc/hostname
thorstek-xm1
vim /etc/hosts
127.0.0.1 localhost.localdomain localhost
::1 localhost.localdomain localhost
127.0.1.1 myhostname.localdomain myhostname
create crypttab
vim /etc/crypttab
# <target name> <source device> <key file> <options>
root /dev/sda2 none luks
initramfs
/etc/mkinitcpio.conf
HOOKS="... keyboard keymap block encrypt ... filesystems ..."
mkinitcpio -p linux
root password
passwd
bootloader
pacman -Sy grub vim sudo
vim /etc/default/grub
GRUB_CMDLINE_LINUX="cryptdevice=/dev/sda2:cryptroot"
GRUB_ENABLE_CRYPTODISK=y
grub-mkconfig -o /boot/grub/grub.cfg
check for insmod luks in grub.cfg
grub-install --target=i386-pc /dev/sda
set up network
ip link set eth0 up
dhcpd
pacman -S xorg-server xorg-xinit
pacman -S xorg-drivers
pacman -S xf86-input-synaptics
vim /etc/resolv.conf
vim /etc/pacman.d/mirrorlist
useradd -m -g users -s /bin/bash thorstek
pacman -S acpid ntp dbus avahi cups cronie
systemctl enable acpid
systemctl enable ntpd
systemctl enable avahi-daemon
systemctl enable org.cups.cupsd.service
localectl set-x11-keymap de pc105 de_nodeadkeys
pacman -S ttf-dejavu
cp /etc/X11/xinit/xinitrc ~/.xinitrc
plasma
pacman -S plasma kde-l10n-de
pacman -S plasma-wayland-session
pacman -S kde-applications
pacman -S sddm
pacman -S sddm-kcm
systemctl enable sddm
pacman
remove package
pacman -R <package>
search
pacman -Ss <package>
pacman -Ss '^vim-'
upgrade
pacman -Syu
list installed
pacman -Q
search file in package
pacman -Fs <file>
rollback
skip from upgrade Warning: Be careful in skipping packages, since partial upgrades are unsupported.
/etc/pacman.conf
IgnorePkg=linux
IgnoreGroup=gnome
plymouth
add splash to /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash loglevel=3"
GRUB_CMDLINE_LINUX="cryptdevice=/dev/sda2:cryptroot"
GRUB_ENABLE_CRYPTODISK=y
mkinitcpio.conf
MODULES=(i915)
HOOKS=(...udev plymouth .. keyboard keymap block plymouth-encrypt filesystems...)
install plymouth
set theme
plymouth-set-default-theme -l
plymouth-set-default-theme
splunk.md
search for postfix mail transactions
postfix/* | rex field=_raw "[^:]+:[^:]+:[^:]+: (?<MaybeUnique>\w+):" | transaction MaybeUnique | search "ejkk.de"
install
tar -xzvf <splunk.tar.gz> -C /opt
cd /opt/splunk/bin
./splunk start
enable at boot
cd /opt/splunk/bin
./splunk enable boot-start
setup new receiver
Settings->Forwarding and Receiving New Receiver port 8334
add monitor
./splunk add monitor /var/log/messages
xorg.md
set custom resolution on external monitors
cvt <width> <height> <refresh rate>
xrandr --newmode "1680x1050_60.00" 146.25 1680 1784 1960 2240 1050 1053 1059 1089 -hsync +vsync
xrandr --addmode VGA-0 1680x1050_60.00
xrandr --output VGA-0 --mode 1680x1050_60.00
sensu-go.md
prereq
kubectl run -i --tty busybox --image=busybox --restart=Never -- sh
get the source
in scripts/k8s/sensu-go
kubectl create namespace monitoring
deploy the kube-state-metrics
kubectl apply -f kubernetes/
install sensuctl
Start
install sensu-backend
kubectl create -f sensu-backend.yaml
port-forward to your machine
kubectl port-forward sensu-backend-.... 8080:8080 8081:8081 3000:3000
install sensuctl bash completion
source <(sensuctl completion bash)
install sensu agents and influxdb
kubectl -n monitoring create configmap influxdb-config --from-file influxdb.conf
kubectl create -f influxdb.sensu.yaml
setup sensu pipeline to store metrics in influxdb
sensuctl namespace create rzneo
sensuctl config set-namespace rzneo
sensuctl create --file influxdb-handler.yaml
sensuctl create --file influxdb.yaml
create asset to get kubernetes metrics
sensuctl create --file prometheus-collector.yaml
deploy grafana
kubectl -n monitoring create configmap grafana-provisioning-datasources --from-file=grafana-provisioning-datasources.yaml
kubectl -n monitoring create configmap grafana-provisioning-dashboards --from-file=grafana-provisioning-dashboards.yaml
kubectl apply -f grafana.sensu.yaml
deploy sensu-agent daemonset
kubectl apply -f sensu-agent-daemonset.yaml
sensuctl create --file kube-state-prometheus.yaml
add vm monitoring
create checks
sensuctl asset create sensu-ruby-runtime --url "https://assets.bonsai.sensu.io/03d08cdfc649500b7e8cd1708bb9bb93d91fea9e/sensu-ruby-runtime_0.0.8_ruby-2.4.4_centos_linux_amd64.tar.gz" --sha512 "7b254d305af512cc524a20a117c601bcfae0d51d6221bbfc60d8ade180cc1908081258a6eecfc9b196b932e774083537efe748c1534c83d294873dd3511e97a3"
sensuctl asset create sensu-plugins-cpu-checks --url "https://assets.bonsai.sensu.io/68546e739d96fd695655b77b35b5aabfbabeb056/sensu-plugins-cpu-checks_4.0.0_centos_linux_amd64.tar.gz" --sha512 "518e7c17cf670393045bff4af318e1d35955bfde166e9ceec2b469109252f79043ed133241c4dc96501b6636a1ec5e008ea9ce055d1609865635d4f004d7187b"
sensuctl check create metrics-cpu --command 'metrics-cpu-pcnt-usage.rb' --interval 60 --subscriptions system --runtime-assets sensu-plugins-cpu-checks,sensu-ruby-runtime
memory
sensuctl asset create sensu-plugins-memory-checks --url "https://assets.bonsai.sensu.io/c5391d4ae186484226732344b35cf95c0b07b8ec/sensu-plugins-memory-checks_4.0.0_centos_linux_amd64.tar.gz" --sha512 "ea297a85aa3612da7f78d948f9784443fffac511040c5130a2dcde7191a0004044c2ef881e665520cbc64431955ab19920d84de6b5fed85c63da7091c4b93bf0"
sensuctl check create metrics-memory --command 'metrics-memory.rb' --interval 60 --subscriptions system --runtime-assets sensu-plugins-memory-checks,sensu-ruby-runtime
set check output metric handler
sensuctl check set-output-metric-handlers metrics-cpu influxdb
sensuctl check set-output-metric-format metrics-cpu graphite_plaintext
sensuctl check set-output-metric-handlers metrics-memory influxdb
sensuctl check set-output-metric-format metrics-memory graphite_plaintext
demo app monitoring
create user with admin
sensuctl namespace create odd
sensuctl config set-namespace odd
sensuctl role create dev --verb get,list,create,update,delete --resource \* --namespace odd
sensuctl role-binding create dev --role dev --group dev
sensuctl user create odd --password odd12345 --namespace odd -g dev
change user
sensuctl configure -n --password 'odd12345' --username odd --url http://localhost:8080 --format tabular --namespace odd
create demo app
kubectl create namespace odd
kubectl apply -f dummy.sensu.yaml
create asset
sensuctl create --file check-plugins.yaml
create check
sensuctl create --file dummy-app-healthz.yaml
docker.md
docker doesn't start
rm -rf /var/lib/docker/*
list images
docker images
list images in registry
curl -k https://myregistry:5000/v2/_catalog | jq
list tags
curl -k https://myregistry:5000/v2/base/alpine/tags/list | jq
export DOCKER_HOST=tcp://x10486.rz2012.adm.denic.de:2375
docker build - < Jenkins_Docker
build image from Dockerfile
docker build --tag=test -f ./Dockerfile .apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pgdata-prod
labels:
app: postgresql
env: default
spec:
capacity:
storage: 2G
accessModes:
- ReadWriteOnce
nfs:
server: kube-master-01.internal.ng.freedom-id.de
path: "/mnt/nfs-ng-fra1-part2/postgres/production/pg-master"
list all containers
docker ps -a
run new container
docker run <image>
start existing container
docker start <containerid>
map host network (even localhost) to container
docker run --net="host" <image>
docker exec -it <container> /bin/sh
docker run --entrypoint "/usr/bin/top" -it <image>
docker delete image
docker rm $(docker ps -a -q)
docker rmi <image>
remove stopped containers
docker container ls -a --filter status=exited --filter status=created
# remove all stopped containers
docker container prune
remove docker volumes
docker volume ls
docker volume rmi 4e678419bf18adddc3c7a4b23457512af8913af888ba7243dec4b6db64293163
expose port
--expose=22
docker exec -ti <container_name> bash
docker own registry
# copy your cert to /etc/docker/certs.d/<docker registry name>
mkdir -p /etc/docker/certs.d/docker-registry.cloud.denic.de
cp docker-registry.crt /etc/docker/certs.d/docker-registry.cloud.denic.de/
push image
docker commit <container> <image>
docker tag <image> <registry image>
docker push <registry image>
docker commit eb46aefe135c ipnanny/ipnanny:1.0
docker tag ipnanny/ipnanny:1.0 dcr.adm.denic.de/ipnanny/ipnanny:1.0
docker push dcr.adm.denic.de/ipnanny/ipnanny:1.0
show mounted path
docker inspect -f "{{ .Mounts }}" <container>
delete registry
curl -i -k -X GET https://kube-registry.freedom-id.de/v2/tools/thorstek-postfix/manifests/latest
https://forums.docker.com/t/delete-repository-from-v2-private-registry/16767
docker Xorg
xhost +
docker run -ti --memory 2gb --rm -e DISPLAY=unix:0 -v /tmp/.X11-unix:/tmp/.X11-unix -v=/dev/dri:/dev/dri:rw myimage mycmd
tools
- ctop monitors the containers on the hosting server
- photon linux distro for hosting containers. It comes with security flags enabled and docker already included
docker registry cleanup
https://github.com/fraunhoferfokus/deckschrubber
enable ipv6
- make sure you can ping from your host
ping -6 heise.de
- get your ipv6 network
ip a
- set forwarding
sysctl net.ipv6.conf.default.forwarding=1 sysctl net.ipv6.conf.all.forwarding=1 sysctl net.ipv6.conf.wlan0.proxy_ndp=1
- create new docker network
docker network create --ipv6 --driver bridge --subnet="2a02:568:122:1:9eb6:d0ff:fefc::/80" --opt "com.docker.network.bridge.name"="br-v6slave" v6slave
- create docker container and get ipv6 address
docker run -it --network=v6slave alpine /bin/sh # auf dem Host ip -6 neigh add proxy 2a02:568:122:1:9eb6::2 dev wlan0