#!/bin/bash #NGINX mkdir -p /volume1/unpacked/rar2fs docker run --name nginx -v /volume1/unpacked/rar2fs:/usr/share/nginx/html:ro -p 5088:80 -d nginx:stable # NGINX-PROXY-MANAGER mkdir -p /volume1/docker/nginx-proxy-manager/data mkdir -p /volume1/docker/nginx-proxy-manager/letsencrypt docker run -d \ --name=nginx-proxy-manager \ -p 5080:80 \ -p 5443:443 \ -p 5081:81 \ -v /volume1/docker/nginx-proxy-manager/data:/data:rw \ -v /volume1/docker/nginx-proxy-manager/letsencrypt:/etc/letsencrypt:rw \ jc21/nginx-proxy-manager:latest #RAR2FS docker stop nginx docker stop nginx-proxy-manager docker stop rar2fs docker rm rar2fs mount --make-shared /volume1/ docker run \ -d \ --init \ --privileged \ --name rar2fs \ --cap-add MKNOD \ --cap-add SYS_ADMIN \ --device /dev/fuse \ --network none \ -v /volume1/stuff:/source \ -v /volume1/unpacked/rar2fs:/destination:rshared \ zimme/rar2fs docker start nginx-proxy-manager docker start nginx
Friday, January 31, 2025
Synology: nginx(-proxy-manager) and rar2fs with docker
Sunday, February 18, 2024
docker rar2fs on synology
If you do not want rar2fs to modify your files when opening, make sure /source is ro
docker run \ -d \ --init \ --name rar2fs \ --cap-add MKNOD \ --cap-add SYS_ADMIN \ --device /dev/fuse \ --network none \ --security-opt apparmor:unconfined \ -v /volume1/rarfiles:/source:ro \ -v /volume1/unrarred:/destination:rshared \ zimme/rar2fs
Tuesday, January 30, 2024
configure nginx
I want to use Certbot with Let’s Encrypt, but I don’t want my webserver to hand over the certificate to everyone knocking at my front door at poort 443.
Here’s how: I presume you have nginx and certbot installed.
Generate a self-singed certificate:
mkdir /etc/nginx/ssl/ sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/nginx/ssl/nginx.key -out /etc/nginx/ssl/nginx.crt
Now use this certificate for the default listener. Also respond with a http 444 (empty reponse).
server { server_name _; listen 80 default_server; listen 443 ssl default_server; # sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/nginx/ssl/nginx.key -out /etc/nginx/ssl/nginx.crt ssl_certificate /etc/nginx/ssl/nginx.crt; ssl_certificate_key /etc/nginx/ssl/nginx.key; return 444; # no reponse }
After that, all you have to do is create a file in /etc/nginx/sites-enabled/ e.g. blog.mydomain.com
server { listen 443 ssl; server_name blog.mydomain.com; root /var/www/blog.mydomain.com; add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; }
Reload nginx.
Then run Certbot and follow the steps:
certbot --nginx --staple-ocsp -d blog.mydomain.com
Connecting with ssl without the proper host-header will now present the self-signed certificate and reponds with an empty reponse.
Wednesday, December 13, 2023
PL2303 usb-to-serial
For some reason this pl2303 won’t even work on linux and thus requires Windows.
Even then, you need this fix anno 2023.
Wednesday, February 10, 2021
Azure/O365/Teams authentication and monitoring bash curl scripts
Authorize for teams.
Replace YOUR_TENANT_ID, YOUR_EMAIL and YOUR_PASSWORD.
Use one of these client_id’s, depending on your usecase.
1fec8e78-bce4-4aaf-ab1b-5451cc387264 (Teams mobile/desktop application)
5e3ce6c0-2b1f-4285-8d4b-75ee78787346 (Teams web application)
auth.sh:
#!/bin/bash curl -s -X POST https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/token \ -c cookies.txt \ -o auth.blob \ -F grant_type=password \ -F resource=https://teams.microsoft.com/ \ -F client_id=1fec8e78-bce4-4aaf-ab1b-5451cc387264 \ -F username=YOUR_EMAIL \ -F password=YOUR_PASSWORD
This will save your bearer token, amongst others, to auth.blob in a json object.
Because the bearer token is only valid for a certain period of time, you’ll need to refresh it. Here’s how. You’ll need ‘jq’ installed to decompose the json object.
refresh.sh:
#!/bin/bash REFRESHTOKEN=`cat auth.blob | jq ".refresh_token" | sed 's/"//g'` curl -s -X POST https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/token \ -c cookies.txt \ -o auth.blob \ -F grant_type=refresh_token \ -F resource=https://teams.microsoft.com/ \ -F client_id=1fec8e78-bce4-4aaf-ab1b-5451cc387264 \ -F refresh_token=$REFRESHTOKEN
In the script you can keep repeating actions, but in order to keep your token active, you can use the following piece of code:
if [ -f "auth.blob" ]; then EXPIRES=`cat auth.blob | jq ".expires_on" | sed 's/"//g'` NOW=`date +%s` TTL=`expr $EXPIRES - $NOW` if [ $TTL -lt 60 ]; then echo "time for a refresh!" ./refresh.sh fi else echo "no previous auth present!" ./auth.sh EXPIRES=`cat auth.blob | jq ".expires_on" | sed 's/"//g'` NOW=`date +%s` TTL=`expr $EXPIRES - $NOW` fi
Now you can do the cool stuff like query your calendar or whatever:
#!/bin/bash BEARER=`cat auth.blob | jq ".access_token" | sed 's/"//g'` curl -s --write-out "%{http_code}|%{time_total}n" -o bla.txt "https://teams.microsoft.com/api/mt/emea/beta/me/calendarEvents?StartDate=2021-02-07T23:00:00.000Z&EndDate=2021-02-14T23:00:00.000Z" \ -H "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Teams/1.3.00.30866 Chrome/80.0.3987.165 Electron/8.5.1 Safari/537.36" \ -H "authorization: Bearer $BEARER"
Or verify your local timezone:
#!/bin/bash BEARER=`cat auth.blob | jq ".access_token" | sed 's/"//g'` date "+%Y.%m.%e %T %N" curl -v 'https://teams.microsoft.com/api/mt/part/emea-03/beta/me/calendarEvents/timeZoneSettingsWithOffset?timezone=Europe%2FAmsterdam' \ -H "authorization: Bearer $BEARER" \ -H 'authority: teams.microsoft.com' echo "" date "+%Y.%m.%e %T %N"
Sunday, December 6, 2020
The Linux Desktop project
Since my work laptop is too restricted, i’m trying to set up Ubuntu on a USB stick and boot from there.
Actually, it has proven to be a very smooth experience so far. I’m impressed by the overall speed and battery performance.
Couple of things i must not forget.
WORK IN PROGRESS
Get some essentials:
sudo apt install curl ffmpeg keepassxc
Latest Google Chrome Browser: link
Latest Citrix Workspace (Receiver): link
Latest Citrix RTME (HDX for Skype): link
After installing the ica client:
sudo ln -s /usr/share/ca-certificates/mozilla/* /opt/Citrix/ICAClient/keystore/cacerts cd /opt/Citrix/ICAClient/keystore/cacerts sudo wget https://www.quovadisglobal.com/wp-content/files/media/quovadis_quovadisrootca2.pem sudo /opt/Citrix/ICAClient/util/ctx_rehash (for sectigo, go to https://support.sectigo.com/articles/Knowledge/Sectigo-Intermediate-Certificates, download the RSA OV bundle and do the same)
modify /opt/Citrix/ICAClient/config/wfclient.template before making the first connection (“~/Library/Application Support/Citrix Receiver/Config” on MacOS by the way)
MSLocaleNumber=0x00000413 KeyboardLayout=US-International
Also: modify /opt/Citrix/ICAClient/config/All_Regions.ini
MouseSendsControlV=False
If you use wayland and experience problems with special key-combo’s like alt-tab:
gsettings set org.gnome.mutter.wayland xwayland-grab-access-rules "['Wfica']" gsettings set org.gnome.mutter.wayland xwayland-allow-grabs true
For other apps: if you don’t know which value to use: xprop WM_CLASS
Lastly:
sudo apt-get install --reinstall libcanberra-gtk-module /opt/Citrix/ICAClient/util/configmgr (for mapping local drives)
Install Microsoft Teams:
sudo curl https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - sudo echo "deb [arch=amd64] https://packages.microsoft.com/repos/ms-teams stable main" > /etc/apt/sources.list.d/teams.list apt update apt install teams
Connecting to exchange web services (for calendar sync)
sudo apt install evolution-ews
Google drive support e.g. for keepass
sudo add-apt-repository ppa:alessandro-strada/ppa sudo apt-get update sudo apt-get install google-drive-ocamlfuse edit ~/.gdfuse/default/config and set mv_keep_target=true mkdir ~/Documents/GoogleDrive google-drive-ocamlfuse ~/Documents/GoogleDrive
startup file for google drive mount and offline backup of keepass databases:
#!/bin/bash google-drive-ocamlfuse ~/Documents/GoogleDrive if [ ! -d ~/BACKUP/keepass/ ]; then mkdir -p ~/BACKUP/keepass/; fi if [ -d ~/Documents/GoogleDrive/keepass/ ]; then cp -f ~/Documents/GoogleDrive/keepass/*.kdbx ~/BACKUP/keepass/; else echo Offline; fi
gedit json formatter:
Preferences - Plugins - enable External Tools
preferences - Manage external Tools
“+”, give name e.g. “Format Json”, shortcut key Ctrl+Alt+J, input=Current Document, output=Replace current document
code:
#! /usr/bin/env python import json import sys j = json.load(sys.stdin) print( json.dumps(j, sort_keys=True, indent=2) )
Kodi:
sudo apt-get install software-properties-common sudo add-apt-repository ppa:team-xbmc/ppa sudo apt-get update sudo apt-get install kodi
Youtube-dl:
sudo curl -L https://yt-dl.org/downloads/latest/youtube-dl -o /usr/local/bin/youtube-dl sudo chmod a+rx /usr/local/bin/youtube-dl sudo ln -s /usr/bin/python3 /usr/local/bin/python
Wednesday, June 24, 2020
iptables log specific connections
Example how to allow certain known connections (e.g. unifi accesspoints) and log unknown connection attempts.
This is done by adding a chain called LOGDROP, append packets that match the criteria (tcp/8080) to that chain, log the packets and drop them.
iptables:
#!/bin/bash AP01="192.168.0.1" AP02="192.168.0.2" AP03="192.168.0.3" # Resetting ... iptables -P INPUT ACCEPT iptables -P FORWARD ACCEPT iptables -P OUTPUT ACCEPT iptables -F iptables -X # Setting default policy on incoming traffic iptables -P INPUT DROP # DENY INCOMING CONNECTIONS iptables -P FORWARD DROP # THIS IS NOT A ROUTER # allowed accesspoints iptables -A INPUT -p tcp --dport 8080 -s $AP01 -j ACCEPT # UNIFI - AP01 iptables -A INPUT -p udp --dport 3478 -s $AP01 -j ACCEPT iptables -A INPUT -p tcp --dport 8080 -s $AP02 -j ACCEPT # UNIFI - AP02 iptables -A INPUT -p udp --dport 3478 -s $AP02 -j ACCEPT iptables -A INPUT -p tcp --dport 8080 -s $AP03 -j ACCEPT # UNIFI - AP03 iptables -A INPUT -p udp --dport 3478 -s $AP03 -j ACCEPT # log AP connections that aren't allowed iptables -N LOGDROP iptables -A INPUT -p tcp --dport 8080 -j LOGDROP iptables -A LOGDROP -j LOG --log-prefix "IPTables-Dropped: " --log-level 7 iptables -A LOGDROP -j DROP # Make persistent iptables-save >/etc/iptables/rules.v4
Create a file in /etc/rsyslog.d/ called “30-unifi-accesspoints.conf” with the following content:
:msg,contains,"IPTables-Dropped: " /var/log/unifi_accesspoints.log
and restart rsyslog
Wednesday, May 13, 2020
Mediainfo with rar support
Mediainfo is a very nice utility, but it works even better with rar support.
Took me a while to compile it succesfully, therefor here are the steps. Easy once you know it :~
First, install current version of the normal Mediainfo and other requirements that we need later.
sudo -s apt install mediainfo libmediainfo-dev git build-essential
Then get the latest source code from the mediaarea.net website. Currently version 20.03.
mkdir /root/installers/ && cd /root/installers wget https://mediaarea.net/download/binary/mediainfo/20.03/MediaInfo_CLI_20.03_GNU_FromSource.tar.gz tar zxvf MediaInfo_CLI_20.03_GNU_FromSource.tar.gz cd MediaInfo_CLI_GNU_FromSource ./CLI_Compile.sh cd MediaInfo/Project/GNU/CLI && make install
Now we’re going to add the rar functionality. It depends on a modified version of libdvdread, also from lundman, that we need first.
cd /root/installers wget http://lundman.net/ftp/dvdread/libdvdread-4.2.0.plus.tar.gz tar zxvf libdvdread-4.2.0.plus.tar.gz cd libdvdread-4.2.0.plus ./configure && make && make install
And now we’re going to build the mediainfo-rar version:
cd /root/installers wget "http://www.lundman.net/ftp/mediainfo-rar/mediainfo-rar-1.4.0.tar.gz" tar zxvf mediainfo-rar-1.4.0.tar.gz cd mediainfo-rar-1.4.0 ./configure && make && make install
Run it: mediainfo-rar.
If it complains about “error while loading shared libraries: libdvdread.so.4”, fix it with:
ln -s /usr/local/lib/libdvdread.so.4 /lib/x86_64-linux-gnu/libdvdread.so.4
That’s all.
Backup links in case sources will ever disappear:
MediaInfo_CLI_20.03_GNU_FromSource.tar.gz
libdvdread-4.2.0.plus.tar.gz
mediainfo-rar-1.4.0.tar.gz
Monday, May 11, 2020
DSM6: Run services like inetd in Synology debian-chroot
Somehow systemd does not run in the debian-chroot, so in case inetd is working for you, here’s how:
ssh to your synology
sudo -s chroot /volume1/@appstore/debian-chroot/var/chroottarget /bin/bash apt install wget tcpd zip unzip openssl lftp openbsd-inetd
Install software of choice. Then:
service openbsd-inetd start exit
Auto-start the inetd service with the debian-chroot:
sqlite3 /volume1/@appstore/debian-chroot/var/debian-chroot.db INSERT INTO services VALUES ('0', 'INETD', '/etc/init.d/openbsd-inetd','ps -p $(cat /var/run/inetd.pid)'); .quit
DSM6: Create a Synology x64 debian chroot
1 Install the synology “noarch” package
Go to the Package Center, then Settings
Trusted sources, “Synology Inc. and trusted publishers”
Package Sources, Add, “SynoCommunity” + “http://packages.synocommunity.com/”
Community, install Python (v2.x, not v3) and nano
Manual Install, debian-chroot_noarch-all_8.4-7.spk but DO NOT “Run after installation”
2 Fix the DSM Interface
Ssh to your Synology
sudo -s cd /volume1/@appstore/debian-chroot/env/bin ./pip install click nano /var/packages/debian-chroot/target/app/debian-chroot.js
Then replace
"url": "3rdparty/debian-chroot/debian-chroot.cgi/direct/router", with "url": "/webman/3rdparty/debian-chroot/debian-chroot.cgi/direct/router", and: 'url': '3rdparty/debian-chroot/debian-chroot.cgi/direct/poller', with 'url': '/webman/3rdparty/debian-chroot/debian-chroot.cgi/direct/poller',
And alter the onclose function:
onClose: function () { this.doClose(); this.mainPanel.onDeactivate(); return true; },
3 Replace the binaries with x64
Remove old binaries:
cd /volume1/@appstore/debian-chroot/var rm -rf chroottarget
Put the x64 chroot.tar.gz in the current directory
tar zxvf chroot.tar.gz echo "chroot" >/volume1/@appstore/debian-chroot/var/chroottarget/etc/hostname cp /etc/resolv.conf /volume1/@appstore/debian-chroot/var/chroottarget/etc/resolv.conf touch /usr/local/debian-chroot/var/installed
If you created a chroot for a different architecture than x64, use the following command. Otherwise skip this.
chroot /volume1/@appstore/debian-chroot/var/chroottarget /debootstrap/debootstrap --second-stage
The chroot is now installed. Start it:
/var/packages/debian-chroot/scripts/start-stop-status start
Enter the chroot:
chroot /volume1/@appstore/debian-chroot/var/chroottarget /bin/bash
Post-installation steps:
apt update && apt upgrade && apt autoremove apt-get install locales dpkg-reconfigure locales -> only "[*] en_US.UTF-8 UTF-8" -> system default: en_US.UTF-8 dpkg-reconfigure tzdata -> set correct timezone, e.g. Europe, Amsterdam
Optional
If you want extra mounts in your chroot, look in:
/var/packages/debian-chroot/scripts/start-stop-status
example to add a Synology share called stuff to the chroot:
add to BOTTOM of all mount commands in section start_daemon script: grep -q "${CHROOTTARGET}/mnt/site " /proc/mounts || mount -o bind /volume1/stuff ${CHROOTTARGET}/mnt/site add to TOP of all umount commands in section stop_daemon script: umount ${CHROOTTARGET}/mnt/site
Reboot your synology
Create debian x64 chroot files (for Synology debian-chroot)
On your current installed debian x64 installation:
sudo apt install debootstrap sudo debootstrap stable chroottarget sudo tar -cvzf chroot.tar.gz chroottarget
Save the chroot.tar.gz
The above creates a debian chroot. Here’s how to make an Ubuntu one. jammy is currently the latest LTS:
debootstrap jammy chroottarget/ http://archive.ubuntu.com/ubuntu/
If you need to create a chroot for a different architecture, eg armhf, the second command would be:
sudo debootstrap --foreign --arch armhf stable chroottarget
Thursday, July 18, 2019
Save and re-install debian/ubuntu packages
save current installed packages to textfile
dpkg -l | grep ^ii | awk '{print $2}' > installed.txt
re-install packages from textfile
sudo apt-get install $(cat installed.txt)
Sunday, January 14, 2018
Ubiquiti Unifi Controller on Ubuntu LTS
Plenty of stuff you can find on the internet.
But for my own references:
Basic Ubuntu LTS installation.
If you’re on a public ip, first get your firewall in order. Then install Unifi.
Firewall
Make sure you’re root (sudo -s), then:
apt-get install netfilter-persistent service netfilter-persistent start invoke-rc.d netfilter-persistent save mkdir /etc/iptables/
In this example:
1.2.3.4 = trusted machine that is allowed to connect to the Unifi controller. Probably your own pc
4.5.6.7 = site 1 with AP’s and other ubiquiti stuff
6.7.8.9 = site 2 with AP’s and other ubiquiti stuff
Ports tcp/8080 and udp/3478 are all you need between your ubiquiti equipment and your controller (see link)
Save the following to firewall.sh and execute (replace ip’s with real ip’s):
#!/bin/bash # Resetting ... iptables -P INPUT ACCEPT iptables -P FORWARD ACCEPT iptables -P OUTPUT ACCEPT iptables -F # Setting default policy on incoming traffic iptables -P INPUT DROP # DENY INCOMING CONNECTIONS iptables -P FORWARD DROP # THIS IS NOT A ROUTER # Exceptions to default policy iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT # FOR INITIATED CONNECTIONS FROM THIS HOST iptables -A INPUT -i lo -j ACCEPT # MUSTHAVE (e.g. for MongoDB bind to localhost) iptables -A INPUT -p tcp --dport 22 -j ACCEPT # SSH iptables -A INPUT -p icmp --icmp-type 8 -j ACCEPT # PING # unify test iptables -A INPUT -p tcp --dport 8443 -s 1.2.3.4 -j ACCEPT # Connections from management host iptables -A INPUT -p tcp --dport 8080 -s 4.5.6.7 -j ACCEPT # UNIFI - INFORM - site1 iptables -A INPUT -p udp --dport 3478 -s 4.5.6.7 -j ACCEPT # UNIFI - STUN - site1 iptables -A INPUT -p tcp --dport 8080 -s 6.7.8.9 -j ACCEPT # UNIFI - INFORM - site2 iptables -A INPUT -p udp --dport 3478 -s 6.7.8.9 -j ACCEPT # UNIFI - STUN - site2 # Make persistent iptables-save >/etc/iptables/rules.v4
Install Unifi
Make sure you’re root (sudo -s), then:
echo 'deb http://www.ubnt.com/downloads/unifi/debian stable ubiquiti' | sudo tee /etc/apt/sources.list.d/100-ubnt-unifi.list apt-key adv --keyserver keyserver.ubuntu.com --recv 06E85760C0A52C50 apt-get update apt-get install unifi
.. last but not least, go to: https://ipaddress:8443/
Saturday, October 21, 2017
make iptables persistent
Recent versions of Ubuntu use a built-in firewall. Therefor iptables doesn’t persist after a reboot.
Here’s how:
# Start sudo service netfilter-persistent start #Add to startup sudo invoke-rc.d netfilter-persistent save
Friday, September 1, 2017
irssi fish
$ apt-get install build-essential irssi-dev libglib2.0-dev libssl-dev cmake git $ git clone https://github.com/falsovsky/FiSH-irssi.git $ cd FiSH-irssi $ cmake . $ make $ cd src $ sudo cp libfish.so /usr/lib/i386-linux-gnu/irssi/modules/ or $ sudo cp libfish.so /usr/lib/irssi/modules/ or $ sudo cp libfish.so /usr/lib/x86_64-linux-gnu/irssi/modules/
Favorite settings:
/set mark_encrypted · /set mark_position 0 /save
Tuesday, August 29, 2017
Compile lftp from source
Get lftp source from http://lftp.yar.ru/get.html
Unpack.
./configure --without-gnutls --with-openssl=/usr/include/openssl/ make
Use the following settings
set ssl:verify-certificate no set ftp:ignore-pasv-address no set ftp:prefer-epsv false set ftp:passive-mode true
Monday, July 3, 2017
32bit on 64bit debian/ubuntu
dpkg --add-architecture i386 apt-get update apt-get install libc6:i386 libc6-i386
Monday, November 7, 2016
Configure smokeping on Ubuntu 16
This is actually not Ubuntu 16 specific, but i need to write it down because i tend to forget this.
Comment the sendmail line in “/etc/smokeping/config.d/pathnames”:
#sendmail = /usr/sbin/sendmail
Set the cgiurl line in “/etc/smokeping/config.d/General”:
cgiurl = http://YOURIPADDRESS/cgi-bin/smokeping.cgi
Add the stuff to “/etc/apache2/conf-available/serve-cgi-bin.conf” so it looks like:
<IfModule mod_alias.c> <IfModule mod_cgi.c> Define ENABLE_USR_LIB_CGI_BIN </IfModule> <IfModule mod_cgid.c> Define ENABLE_USR_LIB_CGI_BIN </IfModule> <IfDefine ENABLE_USR_LIB_CGI_BIN> ScriptAlias /cgi-bin/ /usr/lib/cgi-bin/ <Directory "/usr/lib/cgi-bin"> AllowOverride None Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch Require all granted </Directory> Alias /smokeping /usr/share/smokeping/www <Directory “/usr/share/smokeping/www”> Options FollowSymLinks </Directory> </IfDefine> </IfModule> # vim: syntax=apache ts=4 sw=4 sts=4 sr noet
Enable CGI:
sudo a2enmod cgi
Restart apache and smokeping:
sudo service apache2 restart sudo service smokeping restart
Wednesday, March 30, 2016
Hot migrate linear LVM to striped
Initial linear LVM
Create the Pysical Volumes
root@lvmtest:~# pvcreate /dev/sdb /dev/sdc Physical volume "/dev/sdb" successfully created Physical volume "/dev/sdc" successfully created
Create the Volume Group
root@lvmtest:~# vgcreate MAIN /dev/sdb /dev/sdc
Create the Logical Volume
root@lvmtest:~# lvcreate -n LVMAIN -l 100%FREE MAIN Logical volume "LVMAIN" created
Create the filesystem, mount it
root@lvmtest:~# mkfs.xfs /dev/MAIN/LVMAIN root@lvmtest:~# mkdir /mnt/mylvmvolume root@lvmtest:~# mount /dev/MAIN/LVMAIN /mnt/mylvmvolume root@lvmtest:~# df -h | grep MAIN /dev/mapper/MAIN-LVMAIN 64G 33M 64G 1% /mnt/mylvmvolume
Create extra space
Add two new disks
root@lvmtest:~# pvcreate /dev/sdd /dev/sde Physical volume "/dev/sdd" successfully created Physical volume "/dev/sde" successfully created
Add the disks to the volumegroup
root@lvmtest:~# vgextend MAIN /dev/sdd /dev/sde
Make it stripe
Now.. you can’t move PE’s between logical volumes. So you have to do a little trick.
Create a mirror (-m 1) of the current data to the recently added space. And make it stripe (—stripes <number of disks>).
Do this in a screen. This can take days, depending on the size!
root@lvmtest:~# lvconvert -m 1 --mirrorlog core --stripes 2 /dev/MAIN/LVMAIN /dev/sdd /dev/sde Using default stripesize 64.00 KiB MAIN/LVMAIN: Converted: 0.0% MAIN/LVMAIN: Converted: 1.0% MAIN/LVMAIN: Converted: 2.4% MAIN/LVMAIN: Converted: 3.7% MAIN/LVMAIN: Converted: 5.1%
While the mirroring is in progress, we look at the stats…
Notice there is only one disk reading (sdb) and two are writing (the striped disks). Perfect!
root@lvmtest:~# iostat -m 2 /dev/sd[b-e] Linux 3.16.0-45-generic (btrfs) 03/30/2016 _i686_ (2 CPU) avg-cpu: %user %nice %system %iowait %steal %idle 0.00 0.00 1.26 0.00 0.00 98.74 Device: tps MB_read/s MB_wrtn/s MB_read MB_wrtn sdb 117.50 58.75 0.00 117 0 sdc 0.00 0.00 0.00 0 0 sdd 117.50 0.00 29.38 0 58 sde 117.50 0.00 29.38 0 58
.. and a little further down the progress data is read from sdc.
Device: tps MB_read/s MB_wrtn/s MB_read MB_wrtn sdb 0.00 0.00 0.00 0 0 sdc 134.50 67.25 0.00 134 0 sdd 134.50 0.00 33.62 0 67 sde 134.00 0.00 33.50 0 67
Cleanup
Let’s break the mirror and go live with the new disks:
root@lvmtest:~# lvconvert -m0 MAIN/LVMAIN /dev/sdb /dev/sdc Logical volume LVMAIN converted.
Remove the old disks from the volume group:
root@lvmtest:~# vgreduce MAIN /dev/sdb /dev/sdc Removed "/dev/sdb" from volume group "MAIN" Removed "/dev/sdc" from volume group "MAIN"
Remove the pysical volumes:
root@lvmtest:~# pvremove /dev/sdb /dev/sdc Labels on physical volume "/dev/sdb" successfully wiped Labels on physical volume "/dev/sdc" successfully wiped
There ya go. No downtime. Hot migrated from linear to striped!
Thursday, August 6, 2015
better compressed dd images of blockdevices
When creating full images from one of my rootdisks …
dd if=/dev/sda | bzip2 >/opt/backup/sda.img.bzip2
… i noticed the backups were growing, but the amount of data on the device was not.
Since dd is a full blocklevel- and not a filebased backup, there must be some free space containing old bits and bytes.
The sfill utility can overwrite the freespace with zeroes, giving me better compressed images.
sfill -f -l -l -z /mnt/mountpoint
Clean ubuntu rootdisk
My script to clean up some stuff.
Seems that those kernel header packages are eating up all inodes on small ext volumes.
#!/bin/sh nr_of_removed_packages=`dpkg -l | egrep "^rc" | cut -d" " -f3 | wc -l` nr_of_active_kernels=`ls /boot/vmlinuz* | wc -l` active_kernels=`ls /boot/vmlinuz* | cut -d" " -f9 | sed -r 's/\/boot\/vmlinuz-//' | sed -r 's/-generic//'` nr_of_headers_to_be_cleaned=`dpkg -l | grep linux-headers | grep -v headers-generic | cut -d" " -f3 | grep -v "$active_kernels" | wc -l` if [ "$nr_of_removed_packages" -gt "0" ]; then echo "Purge configuration files for removed packages ($nr_of_removed_packages)" dpkg --purge `dpkg -l | egrep "^rc" | cut -d" " -f3` else echo "No removed packages" fi if [ "$nr_of_headers_to_be_cleaned" -gt "0" ]; then echo "Cleaning old kernel headers, but skipping active kernels:" echo "$active_kernels" echo "" echo "Going to clean:" dpkg -l | grep linux-headers | grep -v headers-generic | cut -d" " -f3 | grep -v "$active_kernels" echo "Wait 5 seconds or break now!!" sleep 5 dpkg --purge `dpkg -l | grep linux-headers | grep -v headers-generic | cut -d" " -f3 | grep -v "$active_kernels"` else echo "No kernel headers to be cleaned" fi echo "Done!"
Wednesday, March 11, 2015
Hot migrate LVM volume to new LUN(s)
This example hot-migrates an existing LVM volume spanned over 3 disks to a new LVM volume spanned over 3 disks.
Prerequisites:
- lvm2 (apt-get install lvm2)
- 3 disks to start with
- 3 new disks to be added. Disks in this example are 100% identical!
Current LVM
This first part you probably already have, since you want to migrate this volume. But i’m going to create it anyway as part of the whole documentation.
I’m not going to work with partitions and just use the whole disks.
Create the Pysical Volumes
root@lvmtest:~# pvcreate /dev/sdb /dev/sdc /dev/sdd Physical volume "/dev/sdb" successfully created Physical volume "/dev/sdc" successfully created Physical volume "/dev/sdd" successfully created
Create the Volume Group
root@lvmtest:~# vgcreate MAIN /dev/sdb /dev/sdc /dev/sdd
Create the Logical Volume
root@lvmtest:~# lvcreate -n LVMAIN -l 100%FREE MAIN Logical volume "LVMAIN" created
Create the filesystem, mount it
root@lvmtest:~# mkfs.xfs /dev/MAIN/LVMAIN root@lvmtest:~# mkdir /mnt/mylvmvolume root@lvmtest:~# mount /dev/MAIN/LVMAIN /mnt/mylvmvolume root@lvmtest:~# df -h | grep MAIN /dev/mapper/MAIN-LVMAIN 24G 33M 24G 1% /mnt/mylvmvolume
Put some data on it
root@lvmtest:/mnt/mylvmvolume# dd if=/dev/zero of=blabla.txt bs=1M count=1000 1000+0 records in 1000+0 records out 1048576000 bytes (1.0 GB) copied, 5.93346 s, 177 MB/s
Add new disks and create the mirror
Add new disks to the machine.
Prepare the new disks:
root@lvmtest:~# pvcreate /dev/sde /dev/sdf /dev/sdg Physical volume "/dev/sde" successfully created Physical volume "/dev/sdf" successfully created Physical volume "/dev/sdg" successfully created
Add the disks to the existing Volume Group
root@lvmtest:~# vgextend MAIN /dev/sde /dev/sdf /dev/sdg
Create a mirror (-m1) of the current data to the recently added space.
Do this in a screen. This can take days, depending on the size!
root@lvmtest:~# lvconvert -m1 --corelog MAIN/LVMAIN /dev/sde /dev/sdf /dev/sdg MAIN/LVMAIN: Converted: 0.0% MAIN/LVMAIN: Converted: 2.8% MAIN/LVMAIN: Converted: 10.6% MAIN/LVMAIN: Converted: 20.2% MAIN/LVMAIN: Converted: 29.9% MAIN/LVMAIN: Converted: 39.1% MAIN/LVMAIN: Converted: 48.8% MAIN/LVMAIN: Converted: 58.3% MAIN/LVMAIN: Converted: 67.8% MAIN/LVMAIN: Converted: 77.5% MAIN/LVMAIN: Converted: 87.1% MAIN/LVMAIN: Converted: 96.8% MAIN/LVMAIN: Converted: 100.0%
The mirror is live.
During the conversion, you might see some nice figures using iostat
Device: tps MB_read/s MB_wrtn/s MB_read MB_wrtn sdb 126.00 0.00 63.00 0 126 sdc 0.00 0.00 0.00 0 0 sdd 0.00 0.00 0.00 0 0 sde 126.00 63.00 0.00 126 0 sdg 0.00 0.00 0.00 0 0 sdf 0.00 0.00 0.00 0 0 sda 0.00 0.00 0.00 0 0 dm-0 0.00 0.00 0.00 0 0 dm-1 1004.00 62.75 0.00 125 0 dm-2 1008.00 0.00 63.00 0 126
Break the mirror and go live with the new disks
Create 0 copies (-m0) for the devices that will be removed, a.k.a. breaking the mirror.
root@lvmtest:~# lvconvert -m0 MAIN/LVMAIN /dev/sdb /dev/sdc /dev/sdd
Remove the devices from the Volume Group
root@lvmtest:~# vgreduce MAIN /dev/sdb /dev/sdc /dev/sdd Removed "/dev/sdb" from volume group "MAIN" Removed "/dev/sdc" from volume group "MAIN" Removed "/dev/sdd" from volume group "MAIN"
Remove the Physical Volumes
root@lvmtest:~# pvremove /dev/sdb /dev/sdc /dev/sdd Labels on physical volume "/dev/sdb" successfully wiped Labels on physical volume "/dev/sdc" successfully wiped Labels on physical volume "/dev/sdd" successfully wiped
That’s it.. Hot migrated!
root@lvmtest:~# df -h | grep MAIN /dev/mapper/MAIN-LVMAIN 24G 11G 14G 42% /mnt/mylvmvolume
Monday, August 25, 2014
Areca and s.m.a.r.t. monitoring
After swapping a couple of defective harddisks, i was wondering why i never got a predictive failure from my Areca controller.
The weird thing is: the logging shows warnings:
2014-08-24 23:15:37 IDE Channel #08 Reading Error 2014-08-24 23:15:28 IDE Channel #08 Reading Error 2014-08-24 23:15:19 IDE Channel #08 Reading Error 2014-08-24 23:15:10 IDE Channel #08 Reading Error
However.. the controller doesn’t seem to do anything with the s.m.a.r.t. values.
Here’s a script you might want to use as a base to get your monitoring up and running.
#!/bin/bash CLI="/path/to/cli32" NR_OF_PORTS=`$CLI disk info | wc -l` # subtract 4 to get rid of the formatting and determine the real number of disks NR_OF_PORTS=`expr $NR_OF_PORTS - 4` echo "Controller has $NR_OF_PORTS ports" for (( i=1; i<=$NR_OF_PORTS; i++ )) do RELOC_SECT=`$CLI disk smart drv=$i | grep "Reallocated Sector Count" | awk '{print $9}'` if [ -z "$RELOC_SECT" ]; then echo "Port $i = No Disk" else echo "Port $i = $RELOC_SECT" fi done
Friday, May 9, 2014
Add Windows back to Grub2
My lifesaver:
create and chmod +x the file:
/etc/grub.d/15_Windows
Add this code:
#! /bin/sh -e echo "Adding Windows" >&2 cat << EOF menuentry "Windows" { set root=(hd0,1) chainloader +1 } EOF
for grub2:
grub2-mkconfig -o /boot/grub2/grub2.cfg
or:
grub-mkconfig -o /boot/grub/grub.cfg
Friday, November 1, 2013
Ubuntu homedir encryption and auto unmounting
Encrypting your homedirectory is generally not a bad idea.
With Ubuntu’s implementation it’s working out of the box.
However, if you are running processes from a “screen” and these processes require data from your homedirectory, they will fail whenever you log out from your ssh session.
It’s because your homedirectory will auto-unmount whenever you log out (eventhough the screen will continue to run).
To NOT auto-unmount your homedirectory, you can remove or rename the following file:
.ecryptfs/auto-umount