Install bun if not already
Install codex
curl -fsSL https://bun.com/install | bash
source ~/.bashrc
bun i -g @openai/codexBashInstall bun if not already
Install codex
curl -fsSL https://bun.com/install | bash
source ~/.bashrc
bun i -g @openai/codexBash
defaultService: projects/norse-lotus-469512-f8/global/backendServices/dasher-origin-long-cached
name: hls-matcher
routeRules:
- description: HLS manifests (.m3u8) -> short TTL
matchRules:
- pathTemplateMatch: /**.m3u8
priority: 10
service: projects/norse-lotus-469512-f8/global/backendServices/dasher-origin-short-cached
- description: HLS segments (.ts) -> long TTL
matchRules:
- pathTemplateMatch: /**.ts
priority: 20
service: projects/norse-lotus-469512-f8/global/backendServices/dasher-origin-long-cached
- description: Fallback
matchRules:
- prefixMatch: /
priority: 1000
service: projects/norse-lotus-469512-f8/global/backendServices/dasher-origin-long-cached
YAMLhttps://chatgpt.com/c/6876ac24-91ac-8013-99ff-f0bb0833d27d
curl -sfL https://get.k3s.io | sh -
Bashsudo k3s kubectl get node
Bashsudo cp /etc/rancher/k3s/k3s.yaml $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=$HOME/.kube/config # add this to your shell RC
Bashcurl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
# add the repo and install the chart
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm repo update
helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kubernetes-dashboard --create-namespace
kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443
Bashcurl -K https://localhost:8443
BashSetup the dashboard service account and token key
cat <<'EOF' | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOFBashkubectl -n kubernetes-dashboard create token admin-userBashTip: If your type already implements MarshalBinary, UnmarshalBinary and ScanRedis, youโre covered for every read/write pathโsingle values, hashes, and command argumentsโwithout adding any other interfaces.
Minimal example showing all three methods on a User struct:
// RedisScanner is implemented by any type that can unmarshal itself
// from a Redis string.
import "github.com/redis/go-redis/v9"
var (
_ encoding.BinaryMarshaler = (*User)(nil)
_ encoding.BinaryUnmarshaler = (*User)(nil)
_ redis.Scanner = (*User)(nil)
)
// You can also use and implement custom RedisScanner interface
// import "github.com/redis/go-redis/v9"
// var _ RedisScanner = (*User)(nil)
// type RedisScanner interface {
// ScanRedis(string) error
// }
type User struct {
ID int `json:"id"`
Name string `json:"name"`
}
// MarshalBinary encodes User as JSON before writing to Redis.
func (u User) MarshalBinary() ([]byte, error) {
return json.Marshal(u)
}
// UnmarshalBinary decodes JSON returned by GET or cmd.Scan(&user).
func (u *User) UnmarshalBinary(data []byte) error {
return json.Unmarshal(data, u)
}
// ScanRedis lets rc.HGetAll(...).Scan(&user) populate the struct from a hash field.
func (u *User) ScanRedis(s string) error {
return json.Unmarshal([]byte(s), u)
}GoBelow is a โcheat-sheetโ for the five interfaces you ever need to think about with go-redis /v9.
Read it row-by-row: pick the operation youโre doing and see which interface the client will look for.
| Interface you implement | Where go-redis looks for it | When it is called | What your method receives / returns | Typical use-case |
|---|---|---|---|---|
encoding.BinaryMarshaler | While building any command (SET, HSET, RPUSH, Lua args, โฆ) | Writing data to Redis | You return the exact bytes that should be sent | Serialise structs or slices in one shot (e.g. JSON, MsgPack) before rc.Set(...) |
encoding.TextMarshaler | Same place as above but only if the type does not have MarshalBinary | Writing data | Return UTF-8 text; Redis still stores it as bytes | Human-readable text representation (UIDs, URLs, โ42โ) when you donโt care about binary |
encoding.BinaryUnmarshaler | When you call cmd.Scan(&dst) on replies coming from GET, HGET, EVAL, etc. | Reading a single value back | You receive the raw byte slice Redis replied with | Turn the bytes you wrote via MarshalBinary back into your struct |
encoding.TextUnmarshaler | Inside the hash-to-struct helper rc.HGetAll(...).Scan(&myStruct) (only if ScanRedis isnโt present) | Reading a hash field into a struct | You get the fieldโs text ([]byte, UTF-8) | Quick way to parse simple string fields (int, time, enum) without custom logic |
hscan.Scanner (re-exported as | First choice in the same hash-to-struct helper | Reading a hash field | You get the field as a string (already decoded from bytes) | Full control over complex fields in hashes; preferred if you need validation |
| Operation in your code | What go-redis does internally | Preference order that it checks | Interface signature you implement | Typical payload you handle |
|---|---|---|---|---|
Writing data โ any command argument (SET, HSET, Lua args, pipelines, โฆ) | appendArg() walks every value | 1. encoding.BinaryMarshaler2. encoding.TextMarshaler3. fmt.Stringer or bare value | MarshalBinary() ([]byte, error)MarshalText() ([]byte, error) | JSON / MsgPack blob, or plain text/number |
Reading a single value (GET key, HGET field, script return, โฆ) followed by cmd.Scan(&dst) | proto.Scan() converts the raw reply | 1. Built-in scalar types (*string, *int64, *time.Time, โฆ) ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ๏ธ2. encoding.BinaryUnmarshaler | UnmarshalBinary([]byte) error | Byte slice โ struct round-trip you stored with MarshalBinary |
Reading a hash into a struct (HGetAll().Scan(&dstStruct)) | hscan maps each field | 1. hscan.Scanner / redis.Scanner2. encoding.TextUnmarshaler3. Built-in stringโint/float/bool converters | ScanRedis(string) errorUnmarshalText([]byte) error | Custom field parsing or quick stringโtime.Duration, enum, etc. |
MarshalBinary, that wins.MarshalText (or even fmt.Stringer) is used.UnmarshalBinary matters.ScanRedis first, then UnmarshalText, then falls back to the built-in converters (stringโint, bool, etc.).UnmarshalBinary if you already have ScanRedis or UnmarshalText?Yes, when you also read the value outside of a hash (e.g. GET key followed by cmd.Scan(&v)).ScanRedis/UnmarshalText are only for the hash helper; they are never called for plain replies.
| Scenario | What to implement |
|---|---|
Storing an entire struct with SET and later GET-ing it back | MarshalBinary + UnmarshalBinary |
| Adding that same struct as a field value inside a Redis hash | The two above plus ScanRedis or UnmarshalText |
| Hash field is just an int but you want automatic conversion | Only UnmarshalText (no need for custom marshal; HSET will write the int as string automatically) |
| You never scan single values, only hashes | Skip UnmarshalBinary; stick to ScanRedis/UnmarshalText |
With this table you can decide, at a glance, which interface your custom type really needs and avoid the classic โcanโt marshal/unmarshal (implement โฆ)โ errors.
https://jeff.vtkellers.com/posts/technology/force-all-dns-queries-through-pihole-with-openwrt
Iโve run Pi-hole on a Raspberry Pi 3 Model B as my local DNS server for a couple of years. Once configured, it noticeably trims page-load times when multiple devices on the LAN visit the same sites.
A recent LabZilla article, Your Smart TV is probably ignoring your Pi-hole, reminded me that any device on the network can simply ignore the DNS server advertised by the router. Many โsmartโ TVs hard-code public resolvers such as 1.1.1.1 or 8.8.8.8. LabZilla showed how to intercept that traffic with pfSense; below is how to do the same on OpenWRT.
My LG B9 TV is air-gapped (its Wi-Fi module was surgically removed), but other gadgetsโa Chromecast and a Windows laptopโcould still bypass Pi-hole. Time to do some firewall tinkering.
DNS usually happens over port 53, so weโll:
In Network โ Firewall โ Port Forwards add:
TCP, UDPlan53lan192.168.1.101 (your Pi-hole)53We must exempt Pi-hole itself or it would loop back on its own queries. Under Advanced Settings add:
Source IP: !192.168.1.101

In Pi-hole โ Local DNS โ DNS Records add a fake entry:
piholetest.example.com10.0.1.1dig piholetest.example.com @1.1.1.1
At this point dig complains that the reply comes from 192.168.1.101 instead of 1.1.1.1. That means interception works; now weโll fix masquerading.
Navigate to Network โ Firewall โ NAT Rules and add:
TCP, UDPlan192.168.1.10153MASQUERADE
dig piholetest.example.com @1.1.1.1
You should now receive:
;; ANSWER SECTION:
piholetest.example.com. 2 IN A 10.0.1.1
...
;; SERVER: 1.1.1.1#53 (1.1.1.1)
The reply appears to come from 1.1.1.1 even though Pi-hole actually answered. Success!
With these two firewall rules, every DNS query on port 53โhard-coded or notโis filtered through Pi-hole, letting its blocklists protect even the sneakiest devices and trimming bandwidth usage.
A determined device could still bypass this by sending DNS over a non-standard port or encapsulating it in HTTPS (DoH/DoT). Catching that traffic would require deeper packet inspection, which is outside the scope of lightweight home routers.
Edit the sshd_config file (on the server):
sudo nano /etc/ssh/sshd_config
Add or modify the line:
UseDNS no
add the following to the file
nano /etc/ssh/sshd_config
Compression no
Ciphers ^chacha20-poly1305@openssh.com
In this example we are sync local directory to Google Drive
rclone sync /mnt/diskvideos gdrive:myvideos \
--drive-impersonate=hello@example.com \
--transfers=10 \
--drive-chunk-size=256M \
--drive-upload-cutoff=256M \
--buffer-size=512M \
--checkers=8 \
--tpslimit=10 \
--progress
# 1. Make sure APT can see the security & updates pockets
sudo apt update
# 2. Pull in the extra module bundle that matches the running kernel
sudo apt install linux-modules-extra-$(uname -r)
# 3. Load the driver and confirm it registered
sudo modprobe ntfs3
cat /proc/filesystems | grep ntfs3 # โ should print "ntfs3"
Mount
sudo mount -t ntfs3 /dev/nvme0n4p2 /mnt/sgdtwo
To mount automatically with fstab
# Replace the 1002 with getting id from
id -u
nano /etc/fstab
UUID=4dd042ad89a2hsa5 /mnt/sgdtwo ntfs3 rw,uid=1002,gid=1002,iocharset=utf8,windows_names,nofail,auto 0 0
Save, exit, then test the line before rebooting, if it doesn’t work remove/comment the line from /etc/fstab
# should run silently
sudo mount -a
df -h
If you want to copy millions of files fast
rclone sync /src/dir /dest \
--progress -P \
--transfers 128 \
--checkers 128 \
--multi-thread-streams 128 \
--local-no-check-updated \
--no-traverse \
--stats 10s
| Layer | What it does | Typical commands | Snapshot capability |
|---|---|---|---|
| ext4 (filesystem) | Puts a directory/file structure on a single block device or partition. | fsck.ext4, tune2fs |
None (needs LVM or other block-level snapshot under it) |
| LVM (Logical Volume Manager, block-device layer) | Pools one-or-more disks/partitions into flexible โVolume Groupsโ, then carves out โLogical Volumesโ that look like ordinary disks to a filesystem. | pvcreate, vgcreate, lvcreate, lvs |
Yes: lvcreate -s makes instantaneous, copy-on-write snapshots |
Think of it this way:
โโโโโโโโโโโโโโโโโโโโโโโโโโ
โ ext4 filesystem โ <-- you mount this ( /, /home โฆ )
โโโโโโโโโโโโฒโโโโโโโโโโโโโโ
โ logical volume
โโโโโโโโโโโโดโโโโโโโโโโโโโโ
โ LVM (device-mapper) โ <-- lets you resize, add disks, snapshot
โโโโโโโโโโโโฒโโโโโโโโโโโโโโ
โ partition or whole disk
โโโโโโโโโโโโดโโโโโโโโโโโโโโ
โ SD card / SSD / HDD โ
โโโโโโโโโโโโโโโโโโโโโโโโโโ
On the official Ubuntu 24.04 for Raspberry Pi image, the installer keeps things simple:
/dev/mmcblk0p1 vfat (boot/EFI)
/dev/mmcblk0p2 ext4 (root filesystem)
No LVM by default, so ext4 sits directly on the SD-card partition. You can confirm:
lsblk -f
# TYPE column shows "part" and "ext4" โ you won't see "lvm"
or
sudo vgdisplay # โcommand not foundโ or โ0 VGsโ โ no LVM
Tools like restic are perfectly happy to back up a live ext4 filesystem, but if a file is being written during the run you can end up with a half-written copy. Using an LVM snapshot (or Btrfs/ZFS snapshot) solves that elegantly:
If you donโt have LVM, you still get a reliable backup most of the timeโjust be aware that a busy database or VM image might need its own dump/hot-backup first.
| Scenario | How to get there |
|---|---|
| Fresh install | When flashing the next Ubuntu image, drop to the server installerโs advanced storage screen โ pick โUse LVM with the new Ubuntu installationโ. |
| Add a second drive (USB/SATA SSD) | Leave the SD card as-is for /boot, create an LVM Volume Group on the SSD, move / or data directories there, then enable snapshots only on that drive. |
| Convert an existing ext4 root in-place | Not really supported. Easiest path is backup โ reinstall with LVM โ restore, or rsync the running system onto a new LVM LV on an external drive and switch cmdline.txt/fstab to boot from it. |
Performance-wise, LVM adds < 1 % overheadโfine on a Pi 4/5 with an SSD. On an SD card, the bigger question is wear-levelling rather than speed, so snapshots are best kept short-lived.
If you want Pageant (PuTTY’s SSH authentication agent) to start automatically when Windows boots, follow these steps:
Step 1: Open the Windows Registry Editor (regedit).
Step 2: Navigate to the following registry key for the current user:
Computer\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run
Step 3: In the right pane, right-click and choose New > Expandable String Value. Using an Expandable String Value allows you to utilize environment variables in your command.

Step 4: Name the new value exactly as the program name; for example, Pageant.
Step 5: Right-click the newly-created value and select Modify. In the Value data field, paste the full path of the executable along with any necessary arguments. Adjust the paths if your installation or configuration differs.
For example, if your Pageant executable is installed in C:\Program Files\PuTTY\ and you have your keys and configuration stored in your user profile, your command might look like this:
"C:\Program Files\PuTTY\pageant.exe" --encrypted "%USERPROFILE%\Documents\private.ppk" --openssh-config "%USERPROFILE%\.ssh\pageant.conf"
After completing these steps, Pageant will automatically start with Windows, loading your specified keys and configuration.
removes the build cache
docker builder prune
Docker Desktop WSL ext4.vhdx too large
https://stackoverflow.com/a/74870395/5442650
252
(Update for December 2022)
The windows utility diskpart can now be used to shrink Virtual Hard Disk (vhdx) files provided you freed up the space inside it by deleting any unnecessary files. I found the info in this guide.
I am putting the gist of the instructions below for reference but the guide above is more complete.
First make sure all WSL instances are shut down by opening an administrator command window, and typing:
>> wsl --shutdown
Verify everything is stopped by:
>> wsl.exe --list --verbose
Then start diskpart:
>> diskpart
and inside diskpart type:
DISKPART> select vdisk file="<path to vhdx file>"
For example:
DISKPART> select vdisk file="C:\Users\user\AppData\Local\Packages\CanonicalGroupLimited.Ubuntu22.04LTS_12rqwer1sdgsda\LocalState\ext4.vhdx"
it should respond by saying DiskPart successfully selected the virtual disk file.
Then to shrink
DISKPART> compact vdisk
After this the vhdx file should shrink in usage. In my case it went from 40GB to 4GB. You can type exit to quit diskpart.
To setup new instance of WSL
wsl --import tdlserver "C:\mywsl\instances\tdlserver" "C:\mywsl\wsl-original-ubuntu2404-exported"
To change the default user nano /etc/wsl.conf and add
[user]
default=abr
Shutdown tdlserver instance using this and then start again to make the default user effective
wsl -t tdlserver
To export original image
wsl --export Ubuntu-24.04 "C:\mywsl\wsl-original-ubuntu2404-exported"
mail_plugins = $mail_plugins zlib
plugin {
zlib_save_level = 6 # 1โฆ9; default is 6
zlib_save = gz # or bz2, xz or lz4
}
mail_plugins = $mail_plugins antispam
And change it to this line
mail_plugins = $mail_plugins antispam zlib
In the file you have to put the following:
protocol imap {
mail_plugins = $mail_plugins antispam imap_zlib
}
protocol lmtp {
mail_plugins = $mail_plugins sieve zlib
}
And then restart the server
/home/user-data/mail/mailboxes with the the maildir path after the old maildir has been copiedsudo chown -R mail:mail /home/user-data/mail/mailboxes
find /home/user-data/mail/mailboxes -type d -exec chmod 700 -R {} \;
find /home/user-data/mail/mailboxes -type f -exec chmod 600 {} \;
find /home/user-data/mail/mailboxes -type d -name Maildir -exec chmod 700 -R {} \;
find /home/user-data/mail/mailboxes -type f \( -name '.sieve' -o -name '.sieve.svbin' \) -exec chmod 644 {} \;
find /home/user-data/mail/mailboxes -type f \( -name 'dovecot-uidlist' -o -name 'dovecot-uidvalidity' -o -name 'dovecot.index*' -o -name 'maildirsize' \) -exec chmod 600 {} \;
find /home/user-data/mail/mailboxes -type f \( -name 'dovecot-uidvalidity.*' \) -exec chmod 444 {} \;
find /home/user-data/mail/mailboxes -type f \( -name 'subscriptions' \) -exec chmod 744 {} \;
find /home/user-data/mail/mailboxes -type f \( -name 'subscriptions' \) -exec sh -c 'echo "Junk" >> "$1"' -- {} \;
sudo chown -R mail:mail /home/user-data/mail/mailboxes
find /home/user-data/mail/mailboxes -type f -name '*dovecot*' -exec rm {} +
sudo service dovecot restart
sudo apt update -y && sudo apt upgrade -y
Setup Docker
Setup Nginx with SSL with proxy
www.example.com example.com -> 127.0.0.1:5151
api.example.com ->127.0.0.1:5152
events.example.com -> 127.0.0.1:5153
echo "vm.max_map_count=262144" >> /etc/sysctl.conf
Setup value for vm.max_map_count and the reboot
sudo reboot now
Setup proper permissions for MS SQL
sudo chown -R 10001:20 docker-data/sql
sudo chmod -R 770 docker-data/sql
In today’s fast-paced digital environment, ensuring minimal downtime and quick reconnection times in client-server communications is crucial. This is particularly important when using gRPC with Go, where the default backoff time for reconnections can be up to 2 minutes. This lengthy delay can lead to a suboptimal user experience, especially in scenarios where the server may temporarily go down and then quickly come back online.
To address this, let’s explore how to reduce the default gRPC connection backoff time from 2 minutes to a more responsive 10 seconds. This adjustment ensures that your Go application reconnects to the server more quickly, improving overall user experience.
Step-by-Step Guide to Reducing gRPC Connection Backoff Time:
Ensure that your Go program includes the gRPC package.
conn, err := grpc.Dial(address, grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoffConfig,
}))
if err != nil {
// Handle error
}Create a backoff configuration with a maximum delay of 10 seconds.
backoffConfig := backoff.Config{
MaxDelay: 10 * time.Second, // Maximum backoff delay set to 10 seconds
}Use the custom backoff configuration when creating your gRPC client.
conn, err := grpc.Dial(address, grpc.WithConnectParams(grpc.ConnectParams{
Backoff: backoffConfig,
}))
if err != nil {
// Handle error
}4. Handling Connection:
Utilize the `conn` object for client operations as usual.
By implementing this custom backoff strategy, your Go application’s gRPC client will attempt to reconnect with a maximum delay of 10 seconds, significantly reducing wait time and enhancing the responsiveness of your application.
Remember, while this setup is generally effective, it’s important to tailor it to your specific use case. Network conditions and server behavior can influence reconnection attempts, so it’s advisable to test thoroughly under various scenarios.
Feel free to share your thoughts or questions in the comments below!
—
Happy coding and stay connected!
sudo apt update && sudo apt upgrade -y && sudo apt-get install -y gnupg software-properties-common
wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
gpg --no-default-keyring --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg --fingerprint
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] \
https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
sudo apt-get install terraform
terraform -help
Install Netperf
sudo apt-get update -y && sudo apt-get install -y netperf
Start listening on server
netserver -p 16604
Speedtest on client
netperf -H 10.13.0.3 -p 16604 -l 300
sudo apt update -y && sudo apt upgrade -y
sudo apt install -y gcc libreadline-devBashwget https://www.lua.org/ftp/lua-5.4.6.tar.gz -O lua-5.4.6.tar.gz
tar -xvzf lua-5.4.6.tar.gz
Bashcd lua-5.4.6
sudo make linux
sudo make installBashPermutations:
Collision Probability:
sudo apt-get install autoconf cmake pkg-config build-essential -y
Setup admin user as sudo
sudo adduser admin
sudo usermod -aG sudo admin
nano docker-compose.yaml
version: "3.8"
services:
wireguardclient:
image: ghcr.io/linuxserver/wireguard:latest
container_name: wireguardclient
cap_add:
- NET_ADMIN
# - SYS_MODULE
environment:
- TZ=UTC
# - PUID=7722
# - PGID=7722
restart: "unless-stopped"
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.default.disable_ipv6=1
volumes:
- /usr/share/zoneinfo/UTC:/etc/localtime:ro
- ./wg0.conf:/config/wg0.conf
- /lib/modules:/lib/modules
web:
image: nginx
container_name: nginx
network_mode: "service:wireguardclient" # <-- important bit, don't forget
volumes:
- ./templates:/etc/nginx/templates
environment:
- NGINX_HOST=foobar.com
- NGINX_PORT=56396
# ubuntu:
# image: ubuntu:22.04
# command: tail -f /dev/null
# container_name: ubuntu
# network_mode: "service:gluetun" # <-- important bit, don't forget
# restart: unless-stopped
#docker exec -it nginx /bin/bash
#docker exec -it ubuntu /bin/bash
#docker exec -it wireguardclient /bin/bash
# apt update
#apt install net-tools curl wget nload htop iputils-ping -y
const crypto = require("crypto");
const genKeyPair = () => {
let k = crypto.generateKeyPairSync("ex25519", {
publicKeyEncoding: { format: "der", type: "spki" },
privateKeyEncoding: { format: "der", type: "pkcs8" }
});
return {
publicKey: k.publicKey.slice(12).toString("base64"),
privateKey: k.privateKey.slice(16).toString("base64")
};
};
console.log(genKeyPair())JavaScriptInstall Java and Bookeeper
sudo apt-get update -y && sudo apt upgrade -y
sudo apt-get install default-jre zookeeperd -y
echo ruok | telnet localhost 2181
sudo adduser kafka
sudo adduser kafka sudo
sudo su -l kafka
mkdir ~/Downloads
mkdir ~/Downloads
curl "https://downloads.apache.org/kafka/2.8.1/kafka_2.13-2.8.1.tgz" -o ~/Downloads/kafka.tgz
mkdir ~/kafka && cd ~/kafka
tar -xvzf ~/Downloads/kafka.tgz --strip 1
nano ~/kafka/config/server.properties
Add to the end
delete.topic.enable = true
Change log.dirs
log.dirs=/home/kafka/logs
sudo nano /etc/systemd/system/zookeeper.service
[Unit]
Requires=network.target remote-fs.target
After=network.target remote-fs.target
[Service]
Type=simple
User=kafka
ExecStart=/home/kafka/kafka/bin/zookeeper-server-start.sh /home/kafka/kafka/config/zookeeper.properties
ExecStop=/home/kafka/kafka/bin/zookeeper-server-stop.sh
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
sudo nano /etc/systemd/system/kafka.service
[Unit]
Requires=zookeeper.service
After=zookeeper.service
[Service]
Type=simple
User=kafka
ExecStart=/bin/sh -c '/home/kafka/kafka/bin/kafka-server-start.sh /home/kafka/kafka/config/server.properties > /home/kafka/kafka/kafka.log 2>&1'
ExecStop=/home/kafka/kafka/bin/kafka-server-stop.sh
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
sudo systemctl start kafka
sudo systemctl status kafka
sudo systemctl enable zookeeper
sudo systemctl enable kafka
To get started, make a new topic called TutorialTopic:
~/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic TutorialTopic
The string โHello, Worldโ should now be published to the TutorialTopic topic:
echo "Hello, World" | ~/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic TutorialTopic > /dev/null
~/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic TutorialTopic --from-beginning
echo "Hello World from Sammy at DigitalOcean!" | ~/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic TutorialTopic > /dev/null
sudo apt-get update -y && sudo apt-get upgrade -y
sudo apt-get install -y squid apache2-utils
sudo htpasswd -c /etc/squid/passwd user1
sudo rm /etc/squid/squid.conf
sudo nano /etc/squid/squid.conf
http_port 9001
auth_param basic program /usr/lib/squid3/basic_ncsa_auth /etc/squid/passwd
auth_param basic realm proxy
acl authenticated proxy_auth REQUIRED
http_access allow authenticated
http_access deny all
sudo systemctl restart squid
curl https://ipinfo.io/json --proxy user1:fuDjcLDpDReZ5AmK@127.0.0.1:9001
curl -OL https://golang.org/dl/go1.25.5.linux-amd64.tar.gz
sha256sum go1.25.5.linux-amd64.tar.gz
BashSha256: 9e9b755d63b36acf30c12a9a3fc379243714c1c6d3dd72861da637f336ebb35b
sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xvf go1.25.5.linux-amd64.tar.gz && rm -rf go1.25.5.linux-amd64.tar.gzBashnano ~/.profileBashThen, add the following information to the end of this file:
export PATH=$PATH:/usr/local/go/bin:$HOME/go/binBashAnd then reload the profile
source ~/.profileBashAdd go to global path
sudo nano /etc/profileBashexport PATH=$PATH:/usr/local/go/binBashIf you want to clear Go cache
go clean -cacheBashIf you want to force reinstall all globally installed binaries
Make sure you have go-global-update installed
go install github.com/Gelio/go-global-update@latestBash# dry run
go-global-update -nBash# force upgrade all
go-global-update -fBashTo Install on RPi
curl -OL https://golang.org/dl/go1.25.1.linux-arm64.tar.gz
sha256sum go1.25.1.linux-arm64.tar.gz
BashSha256: 65a3e34fb2126f55b34e1edfc709121660e1be2dee6bdf405fc399a63a95a87d
sudo rm -rf /usr/local/go
sudo tar -C /usr/local -xvf go1.25.1.linux-arm64.tar.gz
Bashsudo apt install certbot python3-certbot-nginx -ysudo nano /etc/nginx/sites-available/example.comserver {
listen 80;
listen [::]:80;
root /var/www/html;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name example.com;
location / {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
#try_files $uri $uri/ =404;
}
#location ~ /\.ht {
# deny all;
#}
}sudo ln -fs /etc/nginx/sites-available/example.com /etc/nginx/sites-enabled/example.comsudo certbot --nginx -d example.comsudo systemctl status certbot.timer
Check if certbot autorenew is setup properly
sudo certbot renew --dry-runsudo ufw allow 'Nginx Full'To setup DNS based SSL one-time only
sudo certbot --manual --preferred-challenges dns certonly -d example.comsudo apt update -y && sudo apt upgrade -y
sudo apt install linux-headers-$(uname -r) wireguard wireguard-dkms net-tools -yBashsudo nano /etc/wireguard/wg0.confBash[Interface]
Address = 10.10.0.1/24
SaveConfig = true
ListenPort = 51820
PrivateKey = SERVER_PRIVATE_KEY
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADEConfsudo wg-quick up wg0Bashsudo wg show wg0Bashsudo systemctl enable wg-quick@wg0BashFor NAT to work, we need to enable IP forwarding. Open the /etc/sysctl.conf file and add or uncomment the following line
sudo nano /etc/sysctl.confBashnet.ipv4.ip_forward=1
sudo sysctl -pBashwg genkey | sudo tee /etc/wireguard/privatekey | wg pubkey | sudo tee /etc/wireguard/publickeyBashsudo nano /etc/wireguard/wg0.confBashFor setting up IP Port forwarding, Add the subnet in AllowedIPs in wg0.conf and also:
PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE; iptables -t nat -A POSTROUTING -o wg0 -j MASQUERADE
PostUp = /etc/wireguard/port-up.sh
PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE; iptables -t nat -D POSTROUTING -o wg0 -j MASQUERADE
PostDown = /etc/wireguard/port-down.shConfIn the port-up.sh
sudo iptables -t nat -A PREROUTING -p tcp --dport 5060 -j DNAT --to-destination 10.30.30.14:5060Bash