$sudo apt-get update
$sudo apt-get install sun-java6-jdk
$sudo update-java-alternatives -s java-6-sun
$java -version
$sudo addgroup hadoop
$sudo adduser –ingroup hadoop hduser
$su hduser
$ssh-keygen -t rsa -P ""
$cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
$ssh slogix.in
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
$cat /proc/sys/net/ipv6/conf/all/disable_ipv6
A return value of 0 means IPv6 is enabled, a value of 1 means disabled
https://archive.apache.org/dist/hadoop/core/hadoop-1.2.1/
$sudo tar xzf hadoop-1.2.1.tar.gz -C /usr/local/
Modify .bahrcvi /home/admin(system name)/.bashrc
# Set Hadoop-related environment variables
export HADOOP_HOME=/usr/local/hadoop-1.2.1
# Set JAVA_HOME
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0
# Some convenient aliases and functions for running Hadoop-related commands
unalias fs &> /dev/null
alias fs="hadoop fs"
unalias hls &> /dev/null
alias hls="fs -ls"
# Requires installed 'lzop' command.
lzohead () {
hadoop fs -cat $1 | lzop -dc | head -1000 | less
}
# Add Hadoop bin/ directory to PATH
export PATH=$PATH:$HADOOP_HOME/bin
Add the following lines to Hadoop xml filesThese files are contained in following directory$ cd /usr/local/hadoop-1.2.1/conf
1.hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0
export HADOOP_HOME_WARN_SUPPRESS="TRUE"
2.Core-site.xml
<configuration></li>
</ul>
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp/hadoop-${user.name}</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:54310</value>
<description>The name of the default file system. </description>
</property>
</configuration>
3.hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.</description>
</property>
</configuration>
4.mapred-site.xml
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>localhost:54311</value>
<description>The host and port that the MapReduce job tracker runs at. If "local", then jobs are run in-process as a single map and reduce task.
</description>
</property>
</configuration>
Execute the below command from Hadoop directory
$hadoop namenode -format
$start-all.sh
$jps6146 JobTracker
6400 TaskTracker
6541 Jps
5806 DataNode
6057 SecondaryNameNode
5474 NameNode
Stop Hadoop Daemons
$stop-all.sh
stopping jobtracker
slogix.in: stopping tasktracker
stopping namenode
slogix.in: stopping datanode
slogix.in: stopping secondarynamenode