LinuxThese are my personal notes that I use as a quick help in my work.
|
|
select instance_name, host_name from v$instance;
Dash '-' in place of an input file indicates standard input and dash in place of an output file indicates standard output
Double dash '--' indicates end of options. Useful for
filenames starting with dash: ls -ltr -- -a_file
Another option: ls -ltr ./-a_file
cmd; cmd
Semi-colon separates two commands on the same line
Wildcards:
* Zero or more chars except a leading '.'
? Any single character
[aeiouAEIOU] Set
[A-Z] Range
[^set] Matches characters NOT in the set
[!set] Same as previous. For literal ^ or !, don't put as first
character
[]set] Literal bracket
[-set] Literal dash first or last
[set-] Literal dash first or last
{a,b,c} Expands to "a b c"
Filename{1,2,3}.txt Expands to "Filename1.txt Filename2.txt
Filename3.txt"
~username
Expands to user's home
directory
~
exands to my home
Excape control characters with ^V, in particular ^V^I gives a tab character.
apropos something
man -k
. search the whatis database
for stringswhatis something
man a_command
more aFile
which aCommand
which
alias
alias aString
alias alias_string='command_string'
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -i'
type a_file
a_command --help
echo $SHELL
man ascii
find . -name doc
find . -iname "whatever"
find . -mtime -1 -print
find find -print
find / -name .profile -print
find . -name file\*
find . -name file\* -group adm
find . -name file\* -o -group adm
find . -name '*' -mtime -1
find . -name '*' -mtime +7 -exec rm -f {} \;
find . -name '*sh' -print0 | xargs -0 grep something
find . -type f -exec grep -il something {} \;
find . -type f -name "*py" -exec grep -iH something {} \;
For viewing files, see further below
cd
cd ..
locate filename_or_directory
chgrp grp file
chmod ugo+rwx filename
chmod go-r filename
chown new_user[:new_group] the_file
cp [-i]
ls -lt
ls -ltr
ls -a
ls -l
fdisk -l
cat
/proc/partitions
. File systems: cat /etc/fstab
and mounted file systems: /etc/mtab
.df -k
df -h
ln -s /realpath link
find . -xtype l 2> /dev/null
ln -s
. Optionally use bit bucket so as not to list errors such as denied permissionhead -20 file_name
tail -20 file_name
tail -1 -f file_name
tail `ls -t1 thread* | head -n 1`
du -rk .
du -k
: KBytesdu -r
: Show error if directory not
accessible (not on Ubuntu)du -s
: current directory onlydu -kr | sort -nr | head -n 20
du -k | sort -nr | head -n 20
du -sk /*
: show size of each directory in
KB (in the example: root directories)du -Sht1G *
t1G
: Show files larger than 1Gh
: human readable sizeS
: do not include large sub-directories mkdir
mkdir a/b/c
mv
pwd
pushd dir
popd
dirs
rm [-i] filename
rmdir [-rf] dir
umask
zcat
mkdir /mnt/usb-drive
mount /dev/sda1 /mnt/usb-drive
cat file
cat f1 > f2
cat f1 >> f2
cat -vet a_file_with_null_chars | grep "\^A"
more
|
more
("pipe" character)less filename
strings aFile
file aFile
umount the file system before running fsck.
Then run fsck /dev/abcd
: see man for options.
You may want to force
a test of bad clusters and verbose output. Fedora: options fv.
If trouble shows up, go into single-user mode (init 1), run fsck several times until no damage is reported, and reboot.
File systems
fdisk
. Then
create the file system, and mount it.
Comments on NFS (Network File System):
Time stamps are defined by the client, and different clients can have
different times
NFS can't do file locking, so each NFS comes with a file locking
daemon, for files
accessed by several clients.
NFS does not know if files are open or closed.
id
grep $USER /etc/passwd
grep $USER /etc/group
useradd -m -s /bin/bash user_name
# simplestuseradd -G primary_group -g secondary_group -d /home/usrs/default_dir -m the_new_user
-s
defines the default shell -m
creates a home directory-c "fname the_last_nm"
is the full name (optional) -Gsambashare
add to groupusermod ...
userdel -r the_user
groupadd
groups a-user
usermod -a -G <groupname> username
usermod -g <groupname> username
passwd [the_user]
yppasswd
.passwd --expire the_user
rlogin host-name
ssh
logout
or ctrl-Dsu
su - username
sudo command username
sudo -u username command
sudo command
" instead.runuser - username -c command
who
w -u
w
last [username]
The /etc/passwd file contents are :
account name : pw placeholder (pw in shadow) : user ID : default group
ID : comment : home directory : login shell
Note that if the account name changes but not the user ID, then file permissions are not changed.
The /etc/group file contents are :
group name : pw placeholder : group ID : member list
Edit password file with vipw, edit groups with vigr. Change passwords for groups with gpasswd.
Steps to add a user in Linux (or use the useradd command for adding and userdel for deleting; see useradd.conf file for configuration):
chage
to configure password
aging (linux).The command passwd -Sa
gives the
status of all users. The L means locked, P means a password is set, NP
means no password (linux).
Local | Remote Host |
---|---|
rlogin host_name |
The user's .rhost file
contains name of originating host Otherwise prompt for password The .profile is bypassed |
Create key:
ssh-keygen -t rsa -f ~/.ssh/aName
: Creates aName and aName.pub. Share the .pub file. If no name, it generates id_rsa and id_rsa.pub
Or: ssh-keygen -t dsa -b 1024 -f ~/.ssh/aName
cp authorized_keys authorized_keys_bkup; cat tmp >> authorized_keys
:
ssh -i ~/.ssh/aName username@remotehost
ssh-keygen -f ~/.ssh/id_rsa.pub -m pem -e
: Convert from pub format to pem
ssh-keygen -f ~/.ssh/id_rsa -p
: Change passphrase
The public key is in the file with the extension .pub
.
Secure the files with chmod go-wrx *.pub
Log in with ssh -i name_of_private_key_file server_name_or_ip
ssh -i .ssh/id_file_name remote_host "ls -l"
scp -i .ssh/id_file_name the_file remote_username@remote_host:the_file
sftp -vb batch_file remote_username@remote_host
(-v is for verbose)
Start ssh agent
eval "$(ssh-agent -s)"
ssh-add ~/.ssh/id_my
Secure the Server
See http://phanvinhthinh.blogspot.com/2010/02/how-to-secure-your-freenas-server.html
Copy /etc/ssh/sshd_config and edit:
PasswordAuthentication no # comment out to allow ssh-copy-id
AuthenticationMethods publickey # comment out to allow ssh-copy-id
PubkeyAuthentication yes # comment out to allow ssh-copy-id
PermitRootLogin no
PermitEmptyPasswords no
# other changes, to be confirmed:
ChallengeResponseAuthentication no
UsePAM no
RSAAuthentication yes
# Limit User ssh access: by default, all systems user can login via SSH using their password or public key
AllowUsers chris someone
# restart
systemctl restart sshd
config file:
Host my-ssh-host
HostName 10.0.0.5
Port 22
User myuser
IdentityFile ~/.ssh/thekey # Not the .pub file, but the secret key file
IdentitiesOnly yes
ps $$
ps
ps ax / ps -A
ps -ef
ps -f -u a_user
ps -ef
ps -uuser1,user2
ps -t console
ps -o pid,user,stat
ps -a
ps g
ps l
ps -u username
ps u
ps v
ps aux
ps aux | grep abc
pgrep abc
ps aux | grep bash
ps x
pstree
top
monitor
free -m
vmstat
kill
kill -9
Nice values from -20 to 19. 0 is default. -20 is highest priority. A "nice" process is a process that is nice to others and is willing to wait more.
renice 0 pid
renice +1 pid
renice -1 pid
renice -n -2 pid
Nice values from -20 to 19. 0 is default. -20 is highest priority.
kill pid
kill -9 pid
kill -STOP pid
kill -CONT pid
pkill firefox
"firefox"
ps aux | grep Xorg
sudo kill -9 pid-of-Xorg
# or pkill -9 Xorg ???
sudo init 5 # this goes back to graphical interface
sudo init 3
first
|
pid |
tty |
stat |
time |
cmd |
F |
S |
UID |
PPID |
C %CPU |
PRI |
NI |
ADDR |
SZ |
WCHAN |
STIME |
%MEM |
PSR |
|
|
|
|
|
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Options | process ID |
Terminal | See below | CPU time consumed |
command | Process flags |
Same as stat |
Parent process ID |
% of CPU |
Priority state (ignore) |
Relative runtime priority |
Mem size (blocks of 1K) |
where in kernel space |
Start time |
Pro-ces-sor | |||||||||
Default | x | x | x | x | x | |||||||||||||||||||
-l | x | x | x | x | x | x | x | x | x | x | x | x | x | |||||||||||
-f | x | x | x | x | x | x | x | x | ||||||||||||||||
-u | x | x | x | |||||||||||||||||||||
U/D | Sleep (uninterruptible, input/output blocked) |
R | Runnable, i.e. in the run queue |
S | Sleeping (less than about 20 sec) |
I | Idle (sleeping more than about 20 sec) |
T | Traced or stopped (could be a ^Z by user) |
Z | Zombie process |
H | Halted process |
Tru64
ps -A -o pmem,pid,rss,command | sort -nr | head
-20
ps -A -o vsz,pcpu,time,user,nswap
ps -A -o
pmem,pid,rss,vsz,pcpu,nswap,time,stime,user,command | sort -nr | head
-20
#!/usr/bin/ksh
ps -A -o pcpu,psr,pmem,rss,vsz,nswap,pid,time,stime,user,command | head
-1
ps -A -o pcpu,psr,pmem,rss,vsz,nswap,pid,time,stime,user,command | grep
-v "\%CPU" | sort -nr | head -10
echo
ps -A -o pmem,rss,vsz,nswap,pcpu,psr,pid,time,stime,user,command | head
-1
ps -A -o pmem,rss,vsz,nswap,pcpu,psr,pid,time,stime,user,command | grep
-v "\%CPU" | sort -nr | head -10
swapon -s
(with root)
?? box at hbsp:
ps -o pcpu,vsz,ruser,pid,ppid,nice,time,tty -A | sort -n
Explanation of columns for "vmstat n s" (first line is since boot):
View uptime by looking at this file: cat
/proc/loadavg
Lilo's purpose is to start up the second stage loader. See /usr/doc/lilo
for doc. The configuration is in /etc/lilo.conf
The next step is loading the kernel. Then the init program is
run. The init reads the /etc/inittab
file.
Typically, the inittab
will first tell the
init program to run the initialization script such as /etc/init.d/rcS
for Debian. Then the scripts for each run-level are executed. The
filesystems in /etc/fstab
are checked (fsch
)
and mounted (mount
).
The inputrc variable says which inputrc program to use to
input from the keyboard. The .inputrc could be in the user's home
directory, in /etc/inputrc
or it could point
to /etc/profile
.
shutdown -h now
shutdown -r now
>>> b
systemctl suspend
man init; man inittab
: specifics for
startup/shutdown
Set up terminal (put in .profile, or whatever):
stty erase "^H" kill "^U" intr "^C" eof "^D"
# Set terminal typestty erase "^H"
configures the
backspace keyeval ` tset -options `
# Set
device dependent terminal type and return the typeset -o vi
access to history with
the esc+k keysset filec
auto-complete filenames
with esc+esc. set -o vi
is esc+\ and
auto-complete on bash is tab./etc/inittab: format is unique ID : run levels : action :
process
Levels are:
The /etc/rc.d directory has the configurations of each run
level. See runlevel with runlevel
.
The symbolic links with K correspond to processed that are stopped,
and symbolic links with S correspond to those that are started. Do ./xyz
status
to see if one of them is running
Get out of the graphical interface (GUI):sudo init 3
who
nohup command &
Simple script:
Simple script (the #! is called a shebang (from sharp-bang) and
indicates that the file is a script. The operating system will try to
execute it using the interpreter specified by the rest of the first
line.
Colon or ":" is no-op command, and can be found as the first line in a
Bourne shell script. So, it should correspond to a valid interpreter.
Be careful when moving to a new machine.)
See /bin/sh --help
for flags
#!/bin/bash
No
spaces around the equal. Surround the value with quotes if
there are spaces in the string. See more in "variables" below.
TXT="Hello World" #
set -e
: scripts stop on error
set -x
: each command is echoed
Execute with:
./script_name
Note that execute permissions may have to be set:
chmod u+x script_name
Run in the background:
./script_name &
List background jobs with:
jobs
Stop a foreground job with ^Z.
Put a stopped job in the background with:
bg %1
Single quotes and double quotes are used to surround strings.
However, variables expand in double quotes (hence the term partial or
weak quoting) and do not expand in single quotes (full or strong
quoting).
Double quotes protect all special characters except $, ` (backquote),
and \ (backslash, used for escape). Also ! for history substitution in C shell.
Single quotes suppresses filename and variable substitution
Back quotes "`" is command substitution
Separate commands on the same line with ";"
Continue a command to the next line with \
The colon (:) is a null command (does nothing).
CommandA && commandB
:
commandB executes only if commandA is successful
CommandA || commandB
: commandB executes if
commandA failed.
Therefore, [ a_cond ] && commandB
is equivalent to :
if [ a_cond ]; then
commandB
fi
Zero before the number means octal notation (012)
Zero X before the number means hexadecimal notation (0xA23D)
nn# before the number means nn base (binary: 2#1001001001)
Eventually put in production in /usr/local/bin
Comments with #. But this may be escaped with " ' or \ (double quote, single quote or backslash) or certain pattern matching expressions.
Redirection:
cmd > fil
ecmd 2> file
cmd 2>1
cmd
&> file
cmd
> file 2>&1
cmd
> file 2> file_err
cmd > /dev/null
>/dev/null
2>&1
>&
/dev/null
exec 1> $0.log 2> $0.err
Question: find equivalent for the other shells
User's default setting | User's init file | Executed when exiting shell | system-wide defaults | system-wide aliases | |
---|---|---|---|---|---|
bash | $HOME/.bash_profile |
$HOME/.bashrc |
$HOME/.bash_logout |
/etc/profile |
/etc/bashrc |
ksh | $HOME/.profile |
|
|
|
|
set prompt = `hostname`':\!)'
# (not sure about this one: see PS1 below)
set prompt = `hostname`'-'`whoami`':\!)'
alias rm "rm -i"
alias mv "mv -i"
alias cp "cp -i"
alias ll 'ls -la'
alias ls "ls -F"
In BASH, Bourne, and Korn: export
PS1=`hostname`'('`whoami`'):$PWD-> '
# $PWD is the current directory (Korn shell)
TXT="Hello World" #
No spaces around the equal.
Surround the value with quotes if there are spaces in the string. See more in
"variables" below.export ${TXT}
# Environment variables have to be exported to be used in another child script if [ ... ]
.echo "${TXT}"
: make it a habit to enclose with double quotes and curly brackets[local] A_VARIABLE=value
#Defines a variable local
to a functionTO_BE_NULL=
# Set to nullset
# See values of all variablesenv
(or env
) # See values of all
environment variablesunset A_VAR
#unset a variable shift
n
moves all the arguments down n positions, but the first
n arguments are lost.args=$#;
lastarg=${!args}
set a_var = (list of space-separated words)
$a_var[n]
nth item$a_var
all items$a_var[n-m]
range of items$#a_var
count of itemsi=`expr $i + 1`
: increase counter. Remember to include spaces
between terms and operators. \*
and \/
the_var=${the_var:-aValue}
Escaping
csh | bourne shell | Korn Shell | Bash Shell |
---|---|---|---|
Recognized as first word of a command | |||
if [expression] |
if [expression] |
if [expression]; |
|
foreach one_item (list) ... continue
# skip the rest and go to beginning of next item ... break
# skip the rest and exit loop ... end |
a_list="green blue red"
|
for a_var in word1 word2
|
for a_var in $( ls )
|
while (expr) |
while command # commands include [[ ]] |
while command |
while [ $I -gt 0 ]; do |
|
until command # commands include [[ ]] |
same | until [ $I -gt 0 ]; do |
|
|
same |
Do not forget the double semi-colons |
#!/bin/sh #(need to work on the variants between shells)don't forget the brackets " () "Call the function without brackets: func_name param1 param2 |
function func_name #or func_name() "function" allows Korn shell semantics Need a space after the () and the command Call the function without brackets: func_name param1 param2 |
[function] func_name () Call the function without brackets: func_name param1 param2 |
|
set A_VAR=value |
A_VAR=value |
A_VAR=value |
A_VAR=value |
`command` |
`command` |
$(command) |
`command` or $(command) |
Technically, the "[expression]" is a command and the exit
status is the condition. If the exit status is 0 then the following
statement is executed. The left bracket is a dedicated command.
Therefore, "if command
" can be used too.
However, double left brackets "[[" is a keyword (bash 2.02) and allows
more extended tests.
Another construct is "if ((expr))
".
The (( )) evaluates and arithmetic expression. Note that (( 0
)) returns 1 and is considered false and (( 1 )) returns 0 and is
considered true.
Best practice is to surround the variables with curly brackets
and double quotes as in:
if [ "${1}" = "" or "${2}" = "" ]
if [ "${AP_BASE}" = "" ]
Best is to use the test operators -n (non-zero-length string) and -z
(zero-length string) instead of comparing to "":
if [ -z "$1" ]; then
echo 'Usage $0 argument'
fi
Loop example:
i=1
while [ $i -lt 9 ] ; do
i=`expr $i + 1`
done
The test expression needs spaces around the operator (do NOT
write "a=b" but "a = b").
The "[" is an alias for the test
command (you
can do man test
).
When comparing strings, add a character in each one: if [
"x$the_variable" = "xsomething" ]
List of operators (see man test
for
shell-specific operators):
[ -n $X ]
true if $X has non zero
length [ -z $X ]
true if $X has zero
length [ -d $DIR ]
true if directory $DIR
exists[ -f $F ]
true if file $F exists
(as a "regular" file)[ -e $F ]
true if file $F exists.
Note, there are many operators for file existence. [ $A -eq $B ]
true if $A and $B
are integers and they are equal [ $A -neq $B ]
true if $A and $B
are integers and they are NOT equal [ $S1 = $S2 ]
true if $S1 and $S2
are equal as strings[ $S1 != $S2 ]
true if $S1 and $S2
are NOT equal as strings[ $A -lt $B ]
true if $A <
$B (both operands should be integers)[ $A -gt $B ]
true if $A >
$B (both operands should be integers)[ $A -ge $B ]
true if $A >=
$B (both operands should be integers)[ $A -le $B ]
true if $A <=
$B (both operands should be integers)[ $f1 -nt $f2 ]
true if $f1 is
newer than $f2[ $f1 -ot $f2 ]
true if $f1 is
older than $f2[ "$expr1" -a "expr2" ]
true if
$expr1 and $epxr2 are true[ "$expr1" -o "expr2" ]
true if
$expr1 or $epxr2 are trueKorn shell: if [ .... ]
:
-r file
: file exists and is
readable.-w file
: file exists and is
writable.-x file
: file exists and is
executable.-f file
: file exists and is a
regular file.-d file
: file exists and is a
directory.-s file
: file exists and has a
size greater than zero.-h file
: file exists and is a
symbolic link.-z s1
: true if length of string
is zero.-n s1
: true if length of string
is non-zero.s1 = s2
: true if strings s1 and
s2 are identical.s1 != s2
: true if strings s1 and
s2 are not identical.s1
: True if s1 is not the null
string (my 2 cents: be more explicit and use an operator).n1 -eq n2
: True if the integers
n1 and n2 are algebraically equal. Comparisons: -ne, -gt, -ge, -lt, and
-le!
: Unary negation operator.-a
: Binary AND operator (higher
precedence than -o).-o
: Binary OR operatorThis list may be different for [[...]] (see man
ksh
,
under "Conditional Expressions"):
-a file
: True if file exists.-d file
: True if file exists and
is a directory.-f file
: True if file exists and
is an ordinary file.-h file
: True if file exists and
is a symbolic link.-L file
: True if file exists and
is a symbolic link.-n string
: True if length of
string is non-zero.-r file
: True if file exists and
is readable by current process.-s file
: True if file exists and
has size greater than zero.-w file
: True if file exists and
is writable by current process.-x file
: True if file exists and
is executable by current process. If directory, then process has
permission to search.-z string
: True if length of
string is zero.file1 -nt file2
: True if file1
exists and is newer than file2.file1 -ot file2
: True if file1
exists and is older than file2.file1 -ef file2
: True if file1
and file2 exist and refer to the same file.string = pattern
: True if string
matches pattern.string != pattern
: True if string
does not match pattern.string1 < string2
: True if
string1 comes before string2 based on ASCII value of charstring1 > string2
: True if
string1 comes after string2 based on ASCII value of charexp1 -eq exp2
: True if exp1 is
equal to exp2.exp1 -ne exp2
: True if exp1 is
not equal to exp2.exp1 -lt exp2
: True if exp1 is
less than exp2.exp1 -gt exp2
: True if exp1 is
greater than exp2.exp1 -le exp2
: True if exp1 is
less than or equal to exp2.exp1 -ge exp2
: True if exp1 is
greater than or equal to exp2.(expression)
: group expressions.! expression
: True if expression
is false.expression1 && expression2 is
true
: True, if expression1 and expression2 are both trueexpression1 || expression2
: True,
if either expression1 or expression2 is truefor i in $( ls ); do #
the
variable $i takes values in the list
...
if [ ... ]; then break; fi
done
COUNTER=0
while [ $COUNTER -lt 10 ]; do
...
let COUNTER+=1
done
COUNTER=0
until [ $COUNTER -gt 10 ]; do
echo COUNTER $COUNTER
let COUNTER+=1
done
case ${the_var} in
"MED"|"TMT" )
echo "must ..."
return 1
;;
"a" )
echo "OK"
;;
* )
echo "Valid parameters are ..."
return 1
;;
esac
Conditional on success of previous command
ls -1 non_existant_file
if [ $? -eq 0 ]
then
echo "success"
else
echo "failed"
fi
echo "done"
function fctn_name {
echo $1
}
echo -n #
-n does
not break the line
exit n
# exits
the script with exit code n, with 0 meaning successful, 1 to 255
meaning error. If no argument then the exit status of the last command
is returned. Note that some exit codes have special meanings, but there
is no "official" list.
OPTIONS="list of words"
This
prompts the user to choose one of the options. Needs exit option.
select opt in $OPTIONS; do #
...
done
echo Please enter ...
Prompts
user to enter a value / input from command line
read VAR_NAME VAR_NAME_2 #
Use stty -echo; read DB_PWD; stty echo
for
passwords
Command substitution:
$(commands)
expands to the output of the
commands. Nesting is possible. Newline characters are not possible.
`commands`
expands to the output of the
commands (the characters ` is referred to as "back tick"). Newline
characters are not possible.
Example: base_list=`cat /etc/oratab | grep -v "#" | awk
-F\: '{print "-"$1"-"}'`
Script for user input of options (tldp.org)
Not yet tested
#!/bin/bash
OPTIONS="cmd1 cmd2 x"
select opt in $OPTIONS; do
if [ "$opt" = "cmd1" ]; then
...
elif [ "$opt" = "cmd2" ]; then
...
elif [ "$opt" = "x" ]; then
exit
else
echo Option "$opt" not known
fi
done
LOG_FILE=/u01/usr/target/logs/a.log |
Infinite loop:
while :; do
echo infinite loop (remember to put an exit condition!!)
done
ls -lt udump | awk '{print "ls -l "$9}'
> a_file
awk -F\t '{print $2, $3}' the_file
awk '{print $3}' | sort -u
awk '$1 == "something" {print $1, "|", $3}'
awk '$1 ~ /^something$/ {print $1, "|", $3}'
awk '$1 !~ /^$/ {print $1, "|", $3}'
awk '{print $1","$2",\""$3"\",\""$4"\""}'
a_file > a_csv_file.csv
awk '{print $1"\t"$2"\t"$3"\t"$4}' a_file
> a_tab_delim_file
awk '{print substr($0, 10, 30)}' a_file
substr($0, n [, m])
: n is the start
position (1 is left-most), m is the optional lengthawk -F"\t" -v OFS="\t" '{print FILENAME,$0}'
file_in > file_out
awk -v var_name=${a_variable} '{...}'
ls -la | awk -F"/" '{print $NF }'
$NF
is the last "column"cook book / Fre
grep "package succeed" thread* | awk '{print
$6":"$10 }' | awk -F":" '{print $1 $2}' | sort -u
grep "package fail"
thread* | awk '{print $6":"$10 }' | awk -F":" '{print $1 $2}' | sort -u
awk '$3 == "package" {print $4":"$6":"$10 }'
thread* | awk -F":" '{print $2, $1, $3}' | sort -u
awk '{print substr($0, 20)}' a_file | sort -u
awk '{ sub("\r$", ""); print }' dos_format
> unix_format
awk 'sub("$", "\r")' unix_file >
dos_format
awk -F"|" '{print $1 " " $2}' dmrt_xxx.ext |
sort -u
awk -F"|" '{print $51, $2, $1}' the_file
awk -F"|" '$51 == "(10 spaces)" {print $1 " "
$2 " " $51}' the_pat_file
Documentation:
ifconfig [-a] [-v] [interf]
ip addr
netstat [-i]
ping -c n
traceroute
traceroute6
for IPv6tracepath ip-or-name
mtr ip-or-name
host ip-or-name
ifplugstatus [eth0]]
sudo apt-get install ifplugd
)route -n
netstat -anp
netstat -nr
netstat -a
/etc/inetd.conf
/etc/services
See /etc/sysconfig/network. This is where the hostname is
defined.
Each interface has a configuration file in
/etc/sysconfig/network-scripts.
Network
file is: /etc/network/interfaces . It contains the defintions for the network interfaces. Two situations: dynamic (with DHCP server) or static: |
|
Dynamic addresses | Static addresses |
---|---|
auto
eth0 |
auto
eth0 |
Restart network: /etc/init.d/networking restart
See the hostname: execute /bin/hostname
or look at file /etc/hostname
Address lookups (host file): /etc/hosts
The file /etc/resolv.conf
points to a
specific server for name lookups (dns servers). In the case of a
router, the address of the router would be in this file.
The Samba configuration is in /etc/samba/smb.conf
Restart Samba server after changing the configuration: /etc/init.d/samba
restart
To install, use the Synaptic Package Managger and search for Samba.
Query the Samba server: smbclient -L ubuntu -U%
(Replace the % with a username to see what a specific user will see.)
-c --> create
-r --> write at end of archive
-t --> list contents
-u --> add files to tape
-x --> extract
options:
-b : block factor (block size) for tapes
-e : exclude
-f : tar file name
-v : verbose
examples:
tar cvf /dev/ntape/tape0 -e ./foo $HOME
tar cvf the_file.tar sub_dir/*
tar -xvf abc.tar
tar xvf -C dir abc.tar
compress *.tar
uncompress *.z
Other utilities: zip
zip a_file
gives a_file.gz
(tar directories before gzip)
zip -d a_file.gz
decompresses
Configuration in .exrc
in home
directory
In vim, do :edit $MYVIMRC
. Locations shown in
:version
.
See variable values with :echo $VIM
and :echo
$HOME
Try C:\users\the_user\_vimrc
or ...\gVimPortable\Data\settings\_vimrc
.
vi -R | Edit in read-only mode |
ESC | cancels a partially entered command |
DEL | interrupts an executing command |
^L | refreshes a scrambled screen |
^G | Shows current line and file information |
Insert
i | insert, ESC to end insert |
I | Insert at beginning of line |
a | insert after the cursor |
A | insert at the end of the line |
o | New line (below) and insert |
O | New line (above) and insert |
J | Join current line with the next |
^W | Erase last word when in insert mode |
^H | Erase last character when in insert mode |
Moving
^U ^D |
Up --> Scroll Up Down --> Scroll Down |
^B ^F |
Back --> Page back Forward --> Page forward |
^Y ^E |
Go up one line Go down one line |
k |
Go up, left, right, down (like arrow). Note that J is join! |
iG :i |
Go to line i |
G | Go to end of file |
+ or CR | Go to first non blank character of next line |
- | Go to first non blank character of previous line |
H, iH M L, iL |
Home --> Move to upper left of screen (home), to
line i of screen Middle --> Move to middle of screen Low --> Move to low part of screen, to line i from bottom |
space | move to next character |
backspace | move to previous character |
Bxxx.bxxx[cursor]xxxe.wxxxE Wxxx | B - b - ew - E W |
w, W | Move to next word, next big word |
b, B | Move to previous word, previous big word |
e, E | Move to end of word, end of big word |
(, ) | Move to beginning of previous, next sentence |
{, } | Move to beginning of previous, next paragraphe |
0 | Move to the beginning of the line |
^ | Move to the first non-blank character |
$ | Move to the end of the line |
mc | Put a marker c (use 'a' to 'z') |
'c | Move to marker c |
'' | Move to previous position |
% | Move to the matching parenthesis or brace. |
Deleting
x | Delete current character |
ix | Delete i characters |
dd | Delete line |
dw | Delete word |
db | Delete backwards |
d) | Delete to end of sentence |
^W | Erase last word when in insert mode |
^H | Erase last character when in insert mode |
Searching
/text | Search |
/\%uUUUU | Search for a character with unicode UUUU (hex notation). Slash back-slash percent lowercase u ... |
?text | Search backwards |
n | Search next |
/^text, /text$ | Search for text at beginning, end of line |
fc | Find character c on current line. Semi-colon ";" repeats |
Fc | Find character c backwards on line. Semi-colon ";" repeats |
tc, Tc | Find character c and put cursor before. Semi-colon ";" repeats |
Advanced Editing
. | Repeat last change |
xp | Transpose characters |
y | Yank |
Y | Yank line |
p | Put yanked line |
s | Substitute (esc to end) |
r, ir | replace with next character, replace next i characters |
R | Replace, end with ESC |
dd | delete line |
dw | delete word |
c$ | change until the end of the line |
cw | change word |
cc or S | Change line, end with ESC |
:[%]s/s1/s2/[g] |
Substitute, % is for all lines, g (=global) all
occurences (otherwise just the first). See section
on regular expressions. Repeat with "&" Range with comma: perform substitution on specific lines (set nu to see line numbers) |
sed 'command' file | Apply command to each line of the file. Examples:sed 's/ //g' file > output_file # Remove spaces
|
u | undo (undo the undo with ctrl-r) |
U | Restore current line as before |
v, V | Start visual selection, then move and do one of these commands: d, c, y, >, < V selects the whole line. Ctrl-v after moving selects a box (nice trick) |
V>, V< | Shift the whole line right or left |
gu, gU | Put selection in lower case, upper case |
ga | Gives the hex and octal representation of the character under the cursor |
qa (...commands...) q @a |
Register commands in "a" Re-play with @a . There are 26 registers, a..z. |
EX commands, including setting parameters and exiting
:f |
Show current file and line |
:w |
write to the file |
:r filename |
Insert file 'filename' after the current line |
:w name |
write to file 'name' |
:q |
quit |
:q! |
quit and discard changes |
:ZZ |
Write changes and exit |
!!cat filename |
Replace current line with file 'filename' |
:set |
Set various parameters, e.g. :set ic
ignore case:set all lists all parametersConfigurations can also go into the .exrc file Show value: :set option? See possible parameters below. See vimdoc too |
:ab short_string string |
Abbreviation: replaces "short_string" with "string" Remove with :una short_string |
:vi |
Go back to vi mode when in EX mode |
:X (uppercase) |
Set the pw for the file. :setlocal cm? to see the method. Set method with :setlocal cm=mm with mm=blowfish2, blowfish, or zip. Note that zip method is backwards compatible, but breakable too. |
incsearch
"
and "hlsearch
" options:be xterm
--> set
selectmode to "", mousemodel to "extend", keymodel to "", and selection
to "inclusive":set slm=cmd / :set selectmode=cmd
--> use v, V, ^V to start selections:set selection=exclusive / :set sel=exclusive
Exclude
character under cursor in visual selections:set ic
--> ignore case when searching:n $VIM
or :echo $VIM
Shows variable $VIM:set ff=unix
or :set
ff=dos
: define the file format, more specifically define
the end-of-line character. You can read in one format and write in the
other. :set
ff
). If it is "dos", then the ^M means there are CR / \r /
chr(13) characters without a LF / \n / chr(10) after them. This worked:
:%s/\r/\r\n/g
. This gives a ^@ character
at the beginning of the line. Search for it with /\%o0
(backslash % lowercase letter o zero). Remove with :%s/\%o0//g
.ggVG
: select all:set nu / :set nonu
Show or hide
the line numbers:set wrap / :set nowrap
Line wrap:set list / :set nolist
Invisible characters. EOL=$:set noautoindent / :set noai
Auto-indent:set noshowmatch / :set nosm
Brief jump to matching brackets when inserting:set expandtab / :set et / :set noet
Replace tabs with spaces. Insert real tab with ctrl^V:set indentexpr=""
Defines how the indents are calculated. The directive does not
work in the vimrc file, so type :set indentexpr=""
:set tabstop=4 shiftwidth=4 expandtab
Advance 4 spaces when pressing tab or ">". Replace existing tabs with spaces: :retab
:set tabstop=4 shiftwidth=4 expandtab
:highlight Comment ctermfg=green guifg=green
Change the practically illegible blue to green for comments:set t_Co=0
: turn off color terminal. Use this when the syntax highlighting scheme makes the text unreadable.:set tabstop=4 shiftwidth=4 expandtab autoindent showmatch nowrap
Best for editing Python scriptsModify multiple files:
vi *html
# or whatever wildcard is appropriate
:bufdo %s/look-for/replace-with/ge | update
Sample vimrc file:
set lines=40
set columns=150
set ignorecase
set noautoindent
set indentexpr=""
set expandtab
set noshowmatch
set backup
set backupext=.bak
set patchmode=.orig.txt
Portable version
Execute with C:\progfile\papps\gVimPortable\App\vim\vim73\gvim.exe
vimrc in "C:\progfile\papps\gVimPortable\Data\settings\_vimrc"
For macros in vim, see below. Consider using the macro function in reflexion too.
To record a macro
Play back the macro
Alphanumeric characters stand for themselves. Prefix other characters with \ if needed.
. (period) | Any one character |
\ | Next character is taken literally |
$ | Match end of line |
^ | Match beginning of line |
% | All lines |
[xy] | Match with 'x' or 'y' |
[0-9],[A-Z],[A-Za-z],[a-zA-Z0-9_] | More than one option for a character |
[^A-Z] | Match with any character except upper case. |
x* x+ xx* x? .* |
Match with 0 or more 'x' 1 or more 'x' (repeating character, use [ ]+ too) 1 or more 'x' ("+" does not seem to work) 0 or 1 occurrences of 'x' 0 or more of any characters |
(reg_expr)* (?:abc) |
Match with 0 or more occurrences of a group of
characters: capture group or cluster group: looks for strings without "abc" |
x*? x+? |
"Ungreedy" matching |
one_string|another | several possible matchings. Best if used in a cluster
group: (?:one_string|another) |
\{min, max\} | Specifies a minimum and a maximum number of occurrences |
\{x\} | Matches x occurrences |
\(...\) | Store matched character |
\n | Retrieve stored characters (n=1..9) |
(a string|another|3rd option) | Search for at least one of three strings (altneration) |
i | i switch at end indicates case in-sensitive. Switches can be cumulated: gei |
g | g switch at end indicates all occurences (otherwise just the first) |
e | e switch at end treats the substitution text as a normal Perl expression |
See www.regular-expressions.info (explore some more)
Examples:
:%s/x.*$// |
|
:%s/^..../....y/ |
asdf --> asdfy |
:%s/</</g |
Prepare for HTML |
s/xyz/abc/g |
g is global option |
sed '5d' |
delete line 5 |
sed '/[Tt]est/d' |
Delete lines with "test" or "Test" |
sed -n '20,25p' |
Print lines 20-25 |
sed -n '20,25p' |
Print lines 20-25 |
sed '1,10s/xyz/abc/g' |
change in first 10 lines |
sed '/jan/s/-1/-5/' |
change -1 to -5 on lines with jan |
sed 's/^...//' |
Delete the first 3 chars |
sed 's/...$//' |
Delete last 3 chars |
sed 's/[ ]*$//' |
Delete trailing spaces at end of lines |
sed 's/\x93/"/g' |
Look for character with unicode 93 (in hex) and replace |
sed -n 'l' |
Print all lines non printable chars --> \nn tab --> > |
'^[a-zA-Z_]+@[a-zA-Z]+\.[a-zA-Z]+$' |
|
echo
grep str filename
-i
ignore case-n
line number-l
list files-v
lines that do not contain the string-h
Do not display filenames when
searching multiple files.sort -nr filename
-n
interpret first field as
numbers-r
reverse order-u
eliminate duplicate lines-o
give file for outputuniq in_file out_file
cut -c list [file ...]
cut -b
for byte position (equivalent to -c
option for single-byte character sets)cut -f list [-d char] [-s] [file ...]
cut
-c
. Put quotes around char if it has a special meaning.
Default is tab.-s
to suppress output of lines with no
delimiterscut -f 2-4 -d:
get columns 2 to 4 defined
by colon as a delimitergrep something * | cut -f 1 -d: | sort -u
tidy up grep outputpaste -d list file1 file2
paste -d "," file1 file2
/usr/xpg4/bin/grep -E 'full regex' filename
/usr/xpg4/bin/grep -E '(792503|796006|801801)'
a_file
tr 'abc' 'ABC' < a_file.txt
tr -d ' '
#deletes spacestr -c 'a' '-'
everything but 'a'tr e z
tr '[a-z]' '[A-Z]'
tr ':lower:' ':upper:'
tokens, such as :alnum:, :alpha:, :digit:, :blank: (space character), :space: (all whitespace), :punct: tr '()' '{}'
tr -s '[:blank:]'
squeeze out multiple occurencessed
crontab -l
mm hh * * d
where mm=minutes, hh=hour,
d=day in week (0=Sun)echo $SHELL
xhost +
export DISPLAY=ip-add:0.0
sysconfig -s
sysconfig -q vm
date +"%m%d %H%M %Y"
mv a_name.txt a_name`date +"%Y%m%d%H%M"`.txt
:
rename file with timestampdate mmddHHMMyyyy
date +%s
cal [m] year
bc
scale = n
grep "19\-OCT\-2004" ...log/listener.log | grep
-i genio | grep -v SID
echo $PATH
PATH=${PATH}:new_path_to_add
ls | xargs
dos2unix in_file_name > out_file_name
awk '{ sub("\r$", ""); print }' dos_format >
unix_format
sed 's/^M$//' dos_format > unix_format
(sed -e option might be needed). Try also tr -d '\r'
unix2dos in_file_name > out_file_name
awk 'sub("$", "\r")' unix_file > dos_format
sed 's/$'"/`echo \\\r`/" unix_file > dos_format
(verify this)diff file1 file2
-c
gives a context of three lines around
the changes, -C n
gives a context of n lines-u
produces a merged view of the
differences-b
ignore blanks-y
side by side (-by is good for side by side of two files)diff file1 file2
diff -rq dir1 dir2
^S ... ^Q
cat a_file | tr '|' '\t'
tr "|" "\t" < cat a_file
sar -u -s 02:00:00
cal 3 2012 | cut -c3-18
!!
!^
!$
!*
!-n
history
set -o vi
[esc]k
(Korn shell)
Ctrl-C Interrupt
Ctrl-Z Suspend
Ctrl-D Exit the bash shell
Ctrl-L Clear the screen
Ctrl-S and Ctrl-Q Stop scrolling and resume output to the screen
Editing line:
Ctrl-A or Home: beginnning of line
Ctrl-E or End: end of line
Ctrl-B Back one character. Alt-B back one word
Ctrl-F Forward one character. Alt-F Forward one word
Ctrl-XX Go to beginning of line, then with Ctrl-XX return to previous position
Ctrl-D Delete Alt-D Delete to end of line
Ctrl-H or backspace
Ctrl-_ (underscore): undo last key press
Ctrl-T Swap the previous two characters
Cut and paste:
Ctrl-W Cut the word before the cursor
Ctrl-K Cut from cursor to end of line
Ctrl-U Cut from beginning of line to cursor
Ctrl-Y Paste what was previously yanked. Note that the clipboard is local to the bash shell.
Ctrl-Ins and Shift-Ins to copy and paste from computer's clipboard
Alt-U Convert to upper case from cursor to end of current word
Alt-L Convert to lower case from cursor to end of current word
Alt-C Convert character under cursor to upper case and go to end of word
History:
Ctrl-P or up arrow: previous command in history
Ctrl-N or down arrow: next command in history
Alt-R Revert changes to command
Ctrl-R Recall: press ctrl-R and type characters (reverse-i-search)
Ctrl-O Run the command found with ctrl-R above
Ctrl-G Exit this recall mode
Tab completion: press tab to complete a directory or file name.
It appears that the shortcuts above require emacs mode (set -o emacs
) as opposed to the vi mode (set -o vi
)
Many thanks to L.H. at How-To Geek. The website is an excellent source of tips, tricks, and in-depth explanations
Location:
- ~/.fonts
- ~/.local/share/fonts
Update with fc-cache
command after adding or removing
Formats:
- TrueType (.ttf)
- PostScript Type 1 (.pfb + .pfm)
- OpenType (.otf)
Double-click a text to copy, and right-click to paste
curl -O url
: Download a file (capital "O")
wget url
: Download a file
Examples:
0 0 * * 0 = weekly
0 0 * * * = daily, at midnight
0 8 * * * = daily, at 08:00
0 * * * * = hourly
# +------------- minute (0 - 59)
# | +------------- hour (0 - 23)
# | | +------------- day of the month (1 - 31)
# | | | +------------- month (1 - 12)
# | | | | +------------- day of the week (0 - 6) (Sunday to Saturday;
# | | | | | 7 is also Sunday on some systems)
# | | | | |
# | | | | |
# * * * * * command-to-execute
See working implementation on https://github.com/yekesys/terraform_ec2. It creates a local file and spins up an EC2 instance.
See Module Structure
# comment
// comment
/* multi-line comment */
Test installation with:
terraform -v
First run after defining Terraform files:
terraform init
This is idempotent, and can be executed multiple times
Show the changes that will be made (like a dry run):
terraform plan
Apply the changes:
terraform apply
or
terraform apply -auto-approve
Remove objects with:
terraform destroy
Format the code:
terraform fmt
Check validity:
terraform validate
Download a new version (to be verified):
terraform init -upgrade
Basic structure of a Terraform block:
<BLOCK TYPE> "<BLOCK LABEL>" "<BLOCK LABEL>" {
# Block body
<IDENTIFIER> = <EXPRESSION> # Argument
}
The state is stored in the file terraform.tfstate
.
Terraform is "declarative". We define the desired end state. Terraform figures out how to get there.
This approach requires that Terraform know the current state. This is in the state file terraform.tfstate
(a json file)
Update the state file:
terraform refresh
Use the -refresh-only mode as a safe way to check Terraform state against real infrastructure. It does not update the infrastructure nor the state file:
terraform plan -refresh-only
terraform apply -refresh-only
Best practices for the state file:
Show the state from state file:
terraform show
Log Terraform core:
export TF_LOG_CORE=TRACE
export TF_LOG_PATH=logs.txt
Log a provider:
export TF_LOG_PROVIDER=TRACE
export TF_LOG_PATH=logs.txt
When done, unset the variables:
export TF_LOG_CORE=
export TF_LOG_PROVIDER=
First, we define the providers
Providers are defined in the root, not in the modules.
Examples of providers: aws, local
Doc for local: https://registry.terraform.io/providers/hashicorp/local/latest/docs
Doc for AWS: https://registry.terraform.io/providers/hashicorp/aws/latest/docs
Provider information should only be in the root module.
Set AWS profile (user) in the main.tf file:
# main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.16"
}
}
required_version = ">= 1.2.0"
}
provider "aws" {
profile = "user2"
}
Assume a role:
provider "aws" {
assume_role {
role_arn = "arn:aws:iam::123456789012:role/dev-full-access"
}
}
list of providers:
https://registry.terraform.io/browse/providers
Documentation:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs
The resource block identifies the type of the resource and name of the resource.
Below is the basic structure of a resource block:
resource "provider_type" "name" {
...
}
The resource type is composed of the provider (above provider
, which corresponds to the name of the provider)
and the type (above type
). The name of the resource is my choice (above name
).
The id of the resource is provider_type.name
(as in the first line of the resource
block, with "." between the resource type and the name).
terraform state list
remote backend:
-lock-timeout=10m : wait 10 minutes for lock to be released
Define as follows:
variable "var_name" {
description = "Text ..."
type = string # or bool or number or list(string)
}
Ideally, put all variables in the variables.tf file, although they can be in any file
Get the value with var.var_name
in other files.
Inside a string, use "....${var.var_name}..."
Local variables:
locals { var_name = a_value }
Get the value with local.var_name
Define an object:
variable "a-name" {
type = object({
name = string
address = string
})
}
Prevent sensitive values from showing in the output. However, nothing prevents these values from showing in the state file, which therefore has to be treated as holding confidential data:
variable "a-name" {
sensitive = true # will not show in outputs
}
output "a_value" {
value = aws_instance.example.public_ip
description = "Text..."
}
Show all outputs with terraform output
and a specific variable with terraform output a_value
.
For output in json format, do terraform output -json
.
All .tf files in the same directory are part of the same module.
Each module (or directory) should ideally have a README.md file, or README
And should always contain main.tf, variables.tf, outputs.tf, even if empty
Some people say to not use the file names main.tf, variable.tf, outputs.tf
in all the modules, but use
different file names. Likewise, some people suggest using json files instead of the tfvars format, and calling the files
name.tfvars.json
instead of name.tfvars
. Instead of outputs, use data blocks and tags.
Child modules should not have a provider block. Provider configurations should only be in the root module. If they are in child modules, there can only be one. But requirements for providers can exist in the various modules in "required_providers" block
Example:
# main.tf in root
provider "aws" {
region = "us-east-1"
}
module "abc" {
source = "../../abc_module" # For local sub-directory, use "./sub-dir" notation
var_name = "asdfasdf"
}
The name "abc"
above is the local name that the calling module
uses to designate the instance of the module. The source
is mandatory.
Multiple module blocks can use the same source. This creates multiple copies, usually with different variables.
All configuration values should be in the variable.tf file
If a variable is used but does not have a value, provide the value at run-time:
terraform apply -var "server_port=8080"
Or:
export TF_VAR_server_port=8080
Catch the output variable values with:
module.the-module-name.the-output-name
Note: the-module-name
is not the sub-directory name, but what follows the keyword module
in the calling module.
The output name is the output defined in the called module.
Count:
Create three things that are the same:
resource "..." "example" {
count = 3
name = "something${count.index}"
}
This creates something1, something2, and something3
List:
Define the variable as type "list(string)"
variable "var_name" {
description = "Text ..."
type = list(string)
}
In the resource creation:
count = length(var.var_name)
filename = var.var_name[count.index]
Get the values with [*]:
output "all_..." {
value = aws_iam_user.example[*].arn
description = "..."
}
Collection:
Define the variable as type "list(string)"
variable "var_name" {
description = "Text ..."
type = list(string)
}
In the resource creation:
for_each = toset(var.var_name)
filename = each.value
Get the values with [*]: output "all_..." { value = aws_iam_user.example[*].arn description = "..." }
Root | Module |
---|---|
Sub-directory
./the_module
|
|
|
|
==> |
|
|
|
|
|
Notes:
main.tf
,
variable.tf
,
data.tf
,
outputs.tf
.main_modulename.tf
,
variable_modulename.tf
,
data_modulename.tf
,
outputs_modulename.tf
.module.the_module_name
module "the_module_name"
var.variable1
variable "variable1"
providername_resourcetype2.the_resource2_name
resource "providername_resourcetype2" "the_resource2_name"
data.providername_resourcetype2.the_data_element_name
data "providername_resourcetype2" "the_data_element_name"
main.tf in root:
provider "local" {
# version = "~> 1.4"
}
module "create_a_file" {
source = "./file_module"
file_content = "\nA text"
file_name = "filea.txt"
}
module "create_b_file" {
source = "./file_module"
file_content = "\nB text"
file_name = "fileb.txt"
}
file_module/variable.tf:
variable "file_content" {
description = "What goes in the file"
type = string
}
variable "file_name" {
description = "List of file names"
type = string
}
file_module/main.tf:
resource "local_file" "a-file" {
content = var.file_content
filename = var.file_name
}
for_each
)main.tf in root:
provider "local" {
# version = "~> 1.4"
}
module "create_a_file" {
source = "./file_module"
file_content = "\nA text"
file_names = ["file1.txt", "file2.txt", "file3.txt"]
}
module "create_b_file" {
source = "./file_module"
file_content = "\nB text"
file_names = ["fileb1.txt", "fileb2.txt", "fileb3.txt"]
}
file_module/variable.tf:
variable "file_content" {
description = "What goes in the file"
type = string
}
variable "file_names" {
description = "List of file names"
type = list(string)
}
file_module/main.tf:
resource "local_file" "a-file" {
content = var.file_content
for_each = toset(var.file_names)
filename = each.value
}
provider "aws" {
region = "us-east-1"
}
variable "server_port" {
description = "The port the server will use for HTTP requests"
type = number
}
resource "aws_security_group" "instance" {
name = "terraform-example-instance"
ingress {
from_port = var.server_port
to_port = var.server_port
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_instance" "example" {
ami = "ami-0022f774911c1d690"
instance_type = "t2.micro"
# reference to another part of the configuration:
vpc_security_group_ids = [aws_security_group.instance.id]
user_data = <<-EOF
#!/bin/bash
echo "Hello, World" > index.html
nohup busybox httpd -f -p "${var.server_port}" &
EOF
tags = {
Name = "terraform-example"
}
}
output "public_ip" {
value = aws_instance.example.public_ip
description = "The public IP of the web server"
}
output "port" {
value = var.server_port
description = "The port of the web server"
}
tfstate
terraform {
backend "s3" {
bucket = "bucket name"
key = "global/s3/terraform.tfstate"
region = "us-east-1"
dynamodb_table = "terraform-up-and-running-locks-table-name"
encrypt = true
}
}
provider "aws" {
region = "us-east-1"
}
resource "aws_s3_bucket" "terraform_state" {
bucket = "bucket name"
versioning {
enabled = true
}
# Enable server-side encryption by default
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
}
resource "aws_dynamodb_table" "terraform_locks" {
name = "terraform-up-and-running-locks-table-name"
billing_mode = "PAY_PER_REQUEST"
hash_key = "LockID"
# the key has to have "LockID"
attribute {
name = "LockID"
type = "S"
}
}
output "s3_bucket_arn" {
value = aws_s3_bucket.terraform_state.arn
description = "The ARN of the S3 bucket"
}
output "dynamodb_table_name" {
value = aws_dynamodb_table.terraform-up-and-running-locks-table-name.name"
description = "The name of the DynamoDB table"
}