echo exit | telnet $IP $PORT
echo exit | telnet $IP $PORT
# Install recap git clone https://github.com/rackerlabs/recap.git cd recap ./recap-installer # If there is no MySQL or it can;t connect to it edit: /etc/recap USEMYSQL=no USEMYSQLPROCESSLIST=no Edit cron if you want to run often than 10 minutes: /etc/cron.d/recap ================================================= LOG ANALYSIS SCRIPTS >> Check for memory usage grep "buffers/cache" /var/log/recap/resources_* | sed 's/^.*resources_\(.*\)\.log.*cache:\s*\([0-9]*\)\s*\([0-9]*\)$/FREE: \3| USED: \2| DATE: \1/ ' >> Running the following to check if the memory went less than 4999MB grep "buffers/cache" /var/log/recap/resources_* | sed 's/^.*resources_\(.*\)\.log.*cache:\s*\([0-9]*\)\s*\([0-9]*\)$/FREE: \3| USED: \2| DATE: \1/ ' | egrep -v "FREE: [65][0-9]{3}\|" | wc -l
# For cron/crontab */5 * * * * /path/myscript.sh > /dev/null 2>&1 ================================================== # Redirect ALL output/error automatically to a file # AND print to console too LOG_OUTPUT=output_error.log exec 1> >(tee -i ${LOG_OUTPUT}) 2>&1 ================================================== # *ALL* redirected to the log files: NO console output at all LOG_OUTPUT=output.log exec 1>>${LOG_OUTPUT} 2>&1 ================================================== # All redirected to 2 different log files LOG_OUTPUT=etup_output.log LOG_ERROR=setup_error.log exec 3>&1 1>>${LOG_OUTPUT} exec 2>>${LOG_ERROR} # use 'P "my message"' instead of echo P () { # Print on console AND file echo -e "\n$1" | tee /dev/fd/3 # Print ONLY on console #echo -e "\n$1" 1>&3 } ================================================== # ALL stdout and stderr to $LOG_OUTPUT # Also stderr to $LOG_ERROR (for extra checks) # P function to print to the console AND logged into $LOG_OUTPUT LOG_OUTPUT=output.log LOG_ERROR=error.log exec 3>&1 1>>${LOG_OUTPUT} exec 2> >(tee -i ${LOG_ERROR}) >> ${LOG_OUTPUT} # use 'P "my message"' instead of echo P () { # Print on console AND file echo -e "$1" | tee /dev/fd/3 # Print ONLY on console #echo -e "\n$1" 1>&3 } # use 'P "my message"' instead of echo to print in BLUE P () { BLUE='\033[1;34m' NC='\033[0m' # No Color echo -e "\n${BLUE}${1}${NC}" | tee /dev/fd/3 }
# VSFTPD chroot configuration >> Create a no-shell user useradd -d $HOME_PATH -s /sbin/nologin $FTPUSER && passwd $FTPUSER !!!MAKE SURE TO CHMOD 755 the parent directory!!! yum -y install vsftpd chkconfig vsftpd on sed -i -e 's/IPTABLES_MODULES=""/IPTABLES_MODULES="ip_conntrack_ftp"/g' /etc/sysconfig/iptables-config modprobe ip_conntrack_ftp echo "rack" >> /etc/vsftpd/vsftpd.chroot_list mv /etc/vsftpd/vsftpd.conf /etc/vsftpd/vsftpd.conf.ORIG cat >/etc/vsftpd/vsftpd.conf <<EOF # vsftpd.conf - PASSIVE anonymous_enable=NO local_enable=YES write_enable=YES local_umask=022 dirmessage_enable=YES xferlog_enable=YES listen_port=21 connect_from_port_20=YES xferlog_std_format=YES listen=YES pam_service_name=vsftpd userlist_enable=YES tcp_wrappers=YES pasv_min_port=60000 pasv_max_port=65000 # Add in /etc/vsftpd/vsftpd.chroot_list who you do *NOT* want to be chrooted chroot_local_user=YES chroot_list_enable=YES chroot_list_file=/etc/vsftpd/vsftpd.chroot_list # RackConnect # pasv_enable=YES # pasv_min_port=60000 # pasv_max_port=60100 # pasv_address=<publicRCip> (might not be required) # Logging xferlog_enable=YES log_ftp_protocol=NO syslog_enable=NO vsftpd_log_file=/var/log/vsftpd.log EOF >> Make sure to comment out "auth required pam_shells.so" in /etc/pam.d/vsftpd (errors in authenticate users with /bin/false shell): sed -i 's/^\(auth.*required.*pam_shells\.so.*$\)/#\1/' /etc/pam.d/vsftpd >> Enable firewall ports (in Rackconnect, open the same on the physical firewall): iptables -I INPUT -p tcp --dport 21 -m comment --comment "FTP" -j ACCEPT iptables -I INPUT -p tcp -m multiport --dports 60000:65000 -m comment --comment "FTP passive mode ports" -j ACCEPT /etc/init.d/iptables save >> Restart the service service vsfptd restart If -> vsftpd: refusing to run with writable root inside chroot () => allow_writable_chroot=YES ======================================================= SFTP Jailed: !!!! remember that the users home directory must be owned by root groupadd sftponly >> 1 domain managed by 1 or more users: useradd -d /var/www/vhosts -s /bin/false -G sftponly bob >> 1 user managing multiple domains: useradd -d /var/www/vhosts -s /bin/false -G sftponly bob SFTPUSER=bob SFTPUSERPASS=$(tr -cd '[:alnum:]' < /dev/urandom | fold -w12 | head -n1) echo "$SFTPUSERPASS" | passwd --stdin $SFTPUSER && echo -e "\nsfptuser: $SFTPUSER\npassword: $SFTPUSERPASS" >> /etc/ssh/sshd_config #Subsystem sftp /usr/libexec/openssh/sftp-server Subsystem sftp internal-sftp >> 1 domain managed by 1 or more users: Match Group sftponly ChrootDirectory %h X11Forwarding no AllowTCPForwarding no ForceCommand internal-sftp >> 1 user managing multiple domains: Match Group sftponly ChrootDirectory /var/www/vhosts/%u X11Forwarding no AllowTCPForwarding no ForceCommand internal-sftp sshd -t service sshd restart >> Set correct permissions!!! chmod 755 /var/www/ chown root:root /var/www chown -R root:sftponly /var/www/* find /var/www/ -type d | xargs chmod 2775 find /var/www/ -type f | xargs chmod 644
On Ubuntu 14 (version 2.1.x)
apt-get install lsyncd grep "CONFIG=" /etc/init.d/lsyncd
-> it should be /etc/lsyncd/lsyncd.conf.lua
mkdir -p /etc/lsyncd/conf.d/
Backup the original file and create a new conf file
mv /etc/lsyncd/lsyncd.conf.lua{,.ORIG} cat <<'EOF' > /etc/lsyncd/lsyncd.conf.lua -- DO NOT EDIT THIS FILE settings { logfile = "/var/log/lsyncd.log", statusFile = "/var/log/lsyncd-status.log", statusInterval = 5 } -- conf.d style configs package.path = "/etc/lsyncd/conf.d/?.lua;" .. package.path local f = io.popen("ls /etc/lsyncd/conf.d/*.lua|xargs -n1 basename|sed 's/.lua//'") for mod in f:lines() do require(mod) end -- // DO NOT EDIT THIS FILE EOF
Create the config file for 2 web nodes called w01 and w02.
These 2 nodes have the following IPs:
10.180.3.201 w01
10.180.3.322 w02
cat <<'EOF' > /etc/lsyncd/conf.d/w0x.lua -- w01 and w02 servers = { "10.180.3.201", "10.180.3.322", } for _, server in ipairs(servers) do sync { default.rsync, source="/var/www/vhosts/", target=server..":/var/www/vhosts/", rsync = { compress = true, archive = true, verbose = true, rsh = "/usr/bin/ssh -p 22 -o StrictHostKeyChecking=no" }, excludeFrom = "/etc/lsyncd/conf.d/w0x.exclusions" } end EOF
Now let’s create the exclusions file. This will be the list of paths that won’t be sync’d.
cat <<'EOF' > /etc/lsyncd/conf.d/w0x.exclusions www.mytestsite.com/ EOF
NOTE! For exclusions, please remember to put the relative path, NOT the full path. In this case, it excludes www.mytestsite.com/ from /var/www/vhosts
cat > /etc/logrotate.d/lsyncd << EOF /var/log/lsyncd/*log { missingok notifempty sharedscripts postrotate if [ -f /var/lock/lsyncd ]; then /sbin/service lsyncd restart > /dev/null 2>/dev/null || true fi endscript } EOF
$ lsyncd --nodaemon -log Exec /etc/lsyncd/lsyncd.conf.lua
ERROR: Terminating since out of inotify watches//Consider increasing /proc/sys/fs/inotify/max_user_watches
Temporary fix:
# echo 100000 > /proc/sys/fs/inotify/max_user_watches
Permanent fix (ALSO write sysctl.conf):
# echo 100000 > /proc/sys/fs/inotify/max_user_watches # echo "fs.inotify.max_user_watches = 100000" >> /etc/sysctl.conf
Manually create list.txt with user:doc_root
e.g.:
mydomain.com:/var/www/vhost/mydomain.com example.com:/var/www/vhost/example.com
Get commands to create FTP users
cat list.txt | awk -F: '{print "useradd -d ",$2, "-s /bin/false -c TICKET_NUMBER ",$1 }'
Get commands to set FTP permissions (if doc_root exists already)
cat list.txt | awk -F: '{print "chown -R",$1, $2 }'
Generate and Assign random passwords to the users.
# for USER in $(awk -F: '{print $1}' list.txt) ; do PASS=$(tr -cd '[:alnum:]' < /dev/urandom | fold -w12 | head -n1) ; echo $PASS | passwd --stdin $USER ; echo -e "username: $USER\npassword: $PASS\n" | tee -a pass.txt ; done ; echo -e "\n========================\nHere the credentials:" ; cat pass.txt
Create a list of vhosts’ paths: vhosts.txt
Example with only .com domains:
/var/www/domain1.com
/var/www/domain2.com
/var/www/domain3.com
Use a regex for sed to extract the vhost name, removing dots (example based on the example above)
This will return a list of PATH and VHOSTNAME. We will use VHOSTNAME as USER for that path
for i in `cat vhosts.txt` ; do echo "$i" | tr '\n' ' ' ; echo "$i" | sed 's/^.*www\/\(.*\)com.*$/\1/' | sed 's/\.//g' ; done >> list.txt
Print out the commands to run to add FTP users (no SSH)
Once checked the output, run these lines
cat list.txt | awk '{print "useradd -d ",$1, "-s /bin/false -c COMMENT_HERE ",$2 }'
(for sftp only):
cat list.txt | awk '{print "useradd -d ",$1, "-s /bin/false -G sftponly -cCOMMENT_HERE ",$2 }'
This will print out commands to run to assign user:apache_group to the vhosts’ paths
cat list.txt | awk '{print "chown -R ",$2 ":www-data ",$1 }'
(for sftp only):
cat list.txt | awk '{print "chown root:root",$1 }' cat list.txt | awk '{print "chown -R ",$2":"$2 ,$1"/*"}'
Set g+s on vhosts to preserve directory owner
[TO CHECK]
for i in `cat list.txt` ; do echo "chmod g+s $i" ; done
[THIS EXECUTE]
for i in `cat list.txt` ; do chmod g+s "$i" ; done
Create list of random passwords using pwgen
for i in `cat list.txt` ; do p=$(pwgen -s -B 16 1) ; echo "$i:$p" ; done > list_u_p.txt
Create list of random passwords using openssl
for i in `cat list.txt` ; do p=$(openssl rand -base64 12) ; echo "$i:$p" ; done > list_u_p.txt
Apply these passwords automatically
for i in `cat list_u_p.txt` ; do USER=`echo "$i" | awk -F":" '{print $1}'` ; PASS=`echo "$i" | awk -F":" '{print $2}'` ; echo -e "$PASS\n$PASS" | passwd "$USER" ; done
Print output for reference
hostname ; cat list_u_p.txt | awk -F":" '{print "\nusername:", $1, "\npassword:", $2}'
Create file without replacing variables:
cat <<'EOF' > /path/file ============================ My name is ${0} I was input via user data ============================ EOF
If you check /path/file you will see exactly the content above.
Create file REPLACING the variables while creating:
cat <<EOF > /path/file ============================ My name is ${0} I was input via user data ============================ EOF
In this example, the variable ${0} will be replaced during the creation of the file, hence the content will display your username.
# Current folder space du -sh <path> # 10 biggest folders du -m <path> | sort -nr | head -n 10 # Check high directories usage. du -hcx --max-depth=5 | grep [0-9]G | sort -nr # Exclude a path from the final calculation cd /path du -sh --exclude=./relative/path/to/uploads # Check APPARENT size du -h --apparent-size /path/file # Check how much space is "wasted": lsof | grep deleted | sed 's/^.* \(REG.*deleted.*$\)/\1/' | awk '{print $5, $3}' | sort | uniq | awk '{sum += $2 } END { print sum }' # >> *if* the number is like "1.5e+10", you might need to use this to see that converted in MB or GB lsof | grep deleted | sed 's/^.* \(REG.*deleted.*$\)/\1/' | awk '{print $5, $3}' | sort | uniq | awk '{sum += $2 } END { print sum " bytes - " sum/1024**2 " MB - " sum/1024**3 " G" }' # Check the biggest files: lsof | grep deleted | sed 's/^.* \(REG.*deleted.*$\)/\1/' | awk '{print $5, $3}' | sort | uniq | awk '{print $2, $1}' | sort -nr >> than you can grep the file name from the output of "lsof | grep deleted" and check for the PID that holds that file (second column) >> and issue the following command: kill -HUP <PID> >> And check again. This should release the used file.
Apparent size is the number of bytes your applications think are in the file. It’s the amount of data that would be transferred over the network (not counting protocol headers) if you decided to send the file over FTP or HTTP. It’s also the result of
cat theFile | wc -c
, and the amount of address space that the file would take up if you loaded the whole thing usingmmap
.Disk usage is the amount of space that can’t be used for something else because your file is occupying that space.
In most cases, the apparent size is smaller than the disk usage because the disk usage counts the full size of the last (partial) block of the file, and apparent size only counts the data that’s in that last block. However, apparent size is larger when you have a sparse file (sparse files are created when you seek somewhere past the end of the file, and then write something there — the OS doesn’t bother to create lots of blocks filled with zeros — it only creates a block for the part of the file you decided to write to).
Source (clarification): http://stackoverflow.com/questions/5694741/why-is-the-output-of-du-often-so-different-from-du-b
Here some handy notes!
-n –20 => HIGHEST
-n 19 => LOWEST
$ ps axl
nice -10 <command name> and nice -n 10 <command name> will do the same thing (both the above commands will make the process priority to the value 10).
A major misconception about nice command is that nice -10 <command name> will run that process with -10 (a higher priority).
In order to assign -10 priority to a command then you should run it as shown below.
nice –10 <command name>
http://www.cyberciti.biz/faq/change-the-nice-value-of-a-process/http://www.thegeekstuff.com/2013/08/nice-renice-command-examples
Renice (change priority of a running process).
Example – put a PID to LOWER priority
$ renice 19 PID
http://www.cyberciti.biz/faq/howto-change-unix-linux-process-priority/
In the Cloud era, virtual servers come with no swap. And it’s perfectly fine, cause swapping isn’t good in terms of performace, and Cloud technology is designed for horizontal scaling, so, if you need more memory, add another server.
However, it could be handy sometimes to have a some more room for testing (and save some money).
So here below one quick script to automatically create a 4GB swap file, activate and also tune some system parameters to limit the use of the swap only when really necessary:
fallocate -l 4G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
sysctl vm.swappiness=0
echo 'vm.swappiness=0' >> /etc/sysctl.conf
tail /etc/sysctl.conf
NOTES:
Swappiness: setting to zero means that the swap won’t be used unless absolutely necessary (you run out of memory), while a swappiness setting of 100 means that programs will be swapped to disk almost instantly.