浏览代码

[core] Remove old Hadoop configs

Removed HDFS trash info
Tested with trash disabled
Romain Rigaux 12 年之前
父节点
当前提交
c51589f

+ 3 - 20
apps/about/src/about/templates/admin_wizard.mako

@@ -30,15 +30,15 @@ ${ header.menubar() }
         % if user.is_superuser:
         % if user.is_superuser:
           ${ _('Quick Start Wizard') } -
           ${ _('Quick Start Wizard') } -
         % endif
         % endif
-        Hue™ ${version} - The Hadoop UI
+        Hue&trade; ${version} - <a href="http://gethue.com" target="_blank" style="color:#777" title="${ _('Visit the project website!') }">The Hadoop UI</a>
       </h2>
       </h2>
 
 
      % if user.is_superuser:
      % if user.is_superuser:
 
 
       <div class="card-body">
       <div class="card-body">
-            <br/>
+        <br/>
 
 
-          <div class="row-fluid">
+         <div class="row-fluid">
           <div id="properties" class="section">
           <div id="properties" class="section">
             <ul class="nav nav-tabs" style="margin-bottom: 0">
             <ul class="nav nav-tabs" style="margin-bottom: 0">
               <li class="active"><a href="#step1" class="step">${ _('Step 1:') } <i
               <li class="active"><a href="#step1" class="step">${ _('Step 1:') } <i
@@ -62,23 +62,6 @@ ${ header.menubar() }
               </div>
               </div>
             </div>
             </div>
 
 
-          <div class="card card-home card-tab card-tab-bordertop">
-            <h2 class="card-heading simple">${ _('HDFS Trash Configuration') }</h2>
-          <div class="card-body">
-          <p>
-            % if trash_enabled:
-            <h5>${ _('Trash is active.')}</h5>
-            % else:
-            ${ _('You can activate trash collection by setting fs.trash.interval in core-site.xml:')}<br/><br/>
-            <pre>
-  &#60;property&#62;
-    &#60;name&#62;fs.trash.interval&#60;/name&#62;
-    &#60;value&#62;10060&#60;/value&#62;
-  &#60;/property&#62;</pre>
-            % endif
-          </p>
-          </div>
-          </div>
           </div>
           </div>
 
 
           <div id="step2" class="stepDetails hide">
           <div id="step2" class="stepDetails hide">

+ 0 - 4
apps/about/src/about/views.py

@@ -26,22 +26,18 @@ from desktop.lib.django_util import render
 from desktop.models import Settings
 from desktop.models import Settings
 from desktop import appmanager
 from desktop import appmanager
 
 
-from hadoop.core_site import get_trash_interval
-
 
 
 def admin_wizard(request):
 def admin_wizard(request):
   apps = appmanager.get_apps(request.user)
   apps = appmanager.get_apps(request.user)
   app_names = [app.name for app in sorted(apps, key=lambda app: app.menu_index)]
   app_names = [app.name for app in sorted(apps, key=lambda app: app.menu_index)]
 
 
   tours_and_tutorials = Settings.get_settings().tours_and_tutorials
   tours_and_tutorials = Settings.get_settings().tours_and_tutorials
-  trash_enabled = get_trash_interval() > 0
 
 
   return render('admin_wizard.mako', request, {
   return render('admin_wizard.mako', request, {
       'version': settings.HUE_DESKTOP_VERSION,
       'version': settings.HUE_DESKTOP_VERSION,
       'apps': dict([(app.name, app) for app in apps]),
       'apps': dict([(app.name, app) for app in apps]),
       'app_names': app_names,
       'app_names': app_names,
       'tours_and_tutorials': tours_and_tutorials,
       'tours_and_tutorials': tours_and_tutorials,
-      'trash_enabled': trash_enabled
   })
   })
 
 
 
 

+ 1 - 8
apps/filebrowser/src/filebrowser/templates/fb_components.mako

@@ -31,7 +31,7 @@ from django.utils.translation import ugettext as _
             <input id="hueBreadcrumbText" type="text" class="input-xxlarge" style="margin-top:4px;margin-right:4px;display:none" data-bind="value: currentPath" />
             <input id="hueBreadcrumbText" type="text" class="input-xxlarge" style="margin-top:4px;margin-right:4px;display:none" data-bind="value: currentPath" />
         </li>
         </li>
         <li class="pull-right">
         <li class="pull-right">
-          <a href="${url('filebrowser.views.view', path=urlencode(path))}?default_to_trash" style="line-height:18px" data-bind="visible: trashEnabled" title="${_('View trash')}">
+          <a href="${url('filebrowser.views.view', path=urlencode(path))}?default_to_trash" style="line-height:18px" title="${_('View trash')}">
             <i class="fa fa-trash-o"></i> ${_('View trash')}
             <i class="fa fa-trash-o"></i> ${_('View trash')}
           </a>
           </a>
         </li>
         </li>
@@ -51,11 +51,6 @@ from django.utils.translation import ugettext as _
             </ul>
             </ul>
         </li>
         </li>
         % endif
         % endif
-        % if not trash_enabled and is_superuser:
-        <li id="trash-help" class="pull-right" style="width:30px;text-align:right;line-height:35px;">
-            <i class='fa fa-question-circle'></i>
-        </li>
-        % endif
     </ul>
     </ul>
 </%def>
 </%def>
 
 
@@ -78,5 +73,3 @@ from django.utils.translation import ugettext as _
       </div>
       </div>
   </div>
   </div>
 </%def>
 </%def>
-
-

+ 0 - 5
apps/filebrowser/src/filebrowser/templates/listdir.mako

@@ -58,7 +58,6 @@ ${ fb_components.menubar() }
             <button class="btn fileToolbarBtn" title="${_('Download')}" data-bind="visible: !inTrash(), click: downloadFile, enable: selectedFiles().length == 1 && selectedFile().type == 'file'"><i class="fa fa-arrow-circle-o-down"></i> ${_('Download')}</button>
             <button class="btn fileToolbarBtn" title="${_('Download')}" data-bind="visible: !inTrash(), click: downloadFile, enable: selectedFiles().length == 1 && selectedFile().type == 'file'"><i class="fa fa-arrow-circle-o-down"></i> ${_('Download')}</button>
             <button class="btn fileToolbarBtn" title="${_('Restore from trash')}" data-bind="visible: inRestorableTrash(), click: restoreTrashSelected, enable: selectedFiles().length > 0"><i class="fa fa-cloud-upload"></i> ${_('Restore')}</button>
             <button class="btn fileToolbarBtn" title="${_('Restore from trash')}" data-bind="visible: inRestorableTrash(), click: restoreTrashSelected, enable: selectedFiles().length > 0"><i class="fa fa-cloud-upload"></i> ${_('Restore')}</button>
             <!-- ko ifnot: inTrash -->
             <!-- ko ifnot: inTrash -->
-              <!-- ko if: trashEnabled -->
                 <div id="delete-dropdown" class="btn-group" style="vertical-align: middle">
                 <div id="delete-dropdown" class="btn-group" style="vertical-align: middle">
                   <button id="trash-btn" class="btn toolbarBtn" data-bind="enable: selectedFiles().length > 0, click: trashSelected"><i class="fa fa-times"></i> ${_('Move to trash')}</button>
                   <button id="trash-btn" class="btn toolbarBtn" data-bind="enable: selectedFiles().length > 0, click: trashSelected"><i class="fa fa-times"></i> ${_('Move to trash')}</button>
                   <button id="trash-btn-caret" class="btn toolbarBtn dropdown-toggle" data-toggle="dropdown" data-bind="enable: selectedFiles().length > 0">
                   <button id="trash-btn-caret" class="btn toolbarBtn dropdown-toggle" data-toggle="dropdown" data-bind="enable: selectedFiles().length > 0">
@@ -68,10 +67,6 @@ ${ fb_components.menubar() }
                     <li><a href="#" class="delete-link" title="${_('Delete forever')}" data-bind="enable: selectedFiles().length > 0, click: deleteSelected"><i class="fa fa-bolt"></i> ${_('Delete forever')}</a></li>
                     <li><a href="#" class="delete-link" title="${_('Delete forever')}" data-bind="enable: selectedFiles().length > 0, click: deleteSelected"><i class="fa fa-bolt"></i> ${_('Delete forever')}</a></li>
                   </ul>
                   </ul>
                 </div>
                 </div>
-              <!-- /ko -->
-            <!-- /ko -->
-            <!-- ko ifnot: trashEnabled -->
-              <button class="btn fileToolbarBtn delete-link" title="${_('Delete forever')}" data-bind="enable: selectedFiles().length > 0, click: deleteSelected"><i class="fa fa-bolt"></i> ${_('Delete forever')}</button>
             <!-- /ko -->
             <!-- /ko -->
             <button class="btn fileToolbarBtn" title="${_('Submit')}"
             <button class="btn fileToolbarBtn" title="${_('Submit')}"
               data-bind="visible: selectedFiles().length == 1 && $.inArray(selectedFile().name, ['workflow.xml', 'coordinator.xml', 'bundle.xml']) > -1, click: submitSelected">
               data-bind="visible: selectedFiles().length == 1 && $.inArray(selectedFile().name, ['workflow.xml', 'coordinator.xml', 'bundle.xml']) > -1, click: submitSelected">

+ 2 - 12
apps/filebrowser/src/filebrowser/templates/listdir_components.mako

@@ -771,15 +771,6 @@ from django.utils.translation import ugettext as _
         $("#editBreadcrumb").show();
         $("#editBreadcrumb").show();
       });
       });
 
 
-      % if not trash_enabled and is_superuser:
-        $("#trash-help").popover({
-            'title': "${_('Did you know?')}",
-            'content': '${_('You can activate HDFS trash by setting fs.trash.interval in core-site.xml.')}',
-            'trigger': 'hover',
-            'html': true,
-            'placement': 'left'
-        });
-      % endif
       $.ajaxSetup({
       $.ajaxSetup({
         error:function (x, e) {
         error:function (x, e) {
           if (x.status == 500) {
           if (x.status == 500) {
@@ -916,7 +907,6 @@ from django.utils.translation import ugettext as _
       self.recordsPerPage = ko.observable($.cookie("hueFilebrowserRecordsPerPage"));
       self.recordsPerPage = ko.observable($.cookie("hueFilebrowserRecordsPerPage"));
       self.targetPageNum = ko.observable(1);
       self.targetPageNum = ko.observable(1);
       self.targetPath = ko.observable("${current_request_path}");
       self.targetPath = ko.observable("${current_request_path}");
-      self.trashEnabled = ko.observable(${ trash_enabled and "true" or "false" });
 
 
       self.sortBy = ko.observable("name");
       self.sortBy = ko.observable("name");
       self.sortDescending = ko.observable(false);
       self.sortDescending = ko.observable(false);
@@ -969,11 +959,11 @@ from django.utils.translation import ugettext as _
       self.currentPath = ko.observable(currentDirPath);
       self.currentPath = ko.observable(currentDirPath);
 
 
       self.inTrash = ko.computed(function() {
       self.inTrash = ko.computed(function() {
-        return self.currentPath().match(/^\/user\/.+?\/\.Trash/) && self.trashEnabled();
+        return self.currentPath().match(/^\/user\/.+?\/\.Trash/);
       });
       });
 
 
       self.inRestorableTrash = ko.computed(function() {
       self.inRestorableTrash = ko.computed(function() {
-        return self.currentPath().match(/^\/user\/.+?\/\.Trash\/.+?/) && self.trashEnabled();
+        return self.currentPath().match(/^\/user\/.+?\/\.Trash\/.+?/);
       });
       });
 
 
       self.getStats = function (callback) {
       self.getStats = function (callback) {

+ 1 - 9
apps/filebrowser/src/filebrowser/views.py

@@ -22,6 +22,7 @@
 
 
 import errno
 import errno
 import logging
 import logging
+import json
 import mimetypes
 import mimetypes
 import operator
 import operator
 import posixpath
 import posixpath
@@ -30,11 +31,6 @@ import shutil
 import stat as stat_module
 import stat as stat_module
 import os
 import os
 
 
-try:
-  import json
-except ImportError:
-  import simplejson as json
-
 from datetime import datetime
 from datetime import datetime
 
 
 from django.contrib import messages
 from django.contrib import messages
@@ -65,7 +61,6 @@ from filebrowser.lib import xxd
 from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
 from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
                               RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
                               RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
                               TrashPurgeForm
                               TrashPurgeForm
-from hadoop.core_site import get_trash_interval
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.hadoopfs import Hdfs
 from hadoop.fs.exceptions import WebHdfsException
 from hadoop.fs.exceptions import WebHdfsException
 
 
@@ -425,8 +420,6 @@ def listdir_paged(request, path):
     if not request.fs.isdir(path):
     if not request.fs.isdir(path):
         raise PopupException("Not a directory: %s" % (path,))
         raise PopupException("Not a directory: %s" % (path,))
 
 
-    trash_enabled = get_trash_interval()
-
     pagenum = int(request.GET.get('pagenum', 1))
     pagenum = int(request.GET.get('pagenum', 1))
     pagesize = int(request.GET.get('pagesize', 30))
     pagesize = int(request.GET.get('pagesize', 30))
 
 
@@ -488,7 +481,6 @@ def listdir_paged(request, path):
         'page': _massage_page(page),
         'page': _massage_page(page),
         'pagesize': pagesize,
         'pagesize': pagesize,
         'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
         'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None,
-        'trash_enabled': trash_enabled,
         'sortby': sortby,
         'sortby': sortby,
         'descending': descending_param,
         'descending': descending_param,
         # The following should probably be deprecated
         # The following should probably be deprecated

+ 0 - 36
desktop/conf.dist/hue.ini

@@ -327,18 +327,6 @@
       # Change this if your HDFS cluster is Kerberos-secured
       # Change this if your HDFS cluster is Kerberos-secured
       ## security_enabled=false
       ## security_enabled=false
 
 
-      # Settings about this HDFS cluster. If you install HDFS in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_HDFS_HOME or /usr/lib/hadoop-hdfs
-      ## hadoop_hdfs_home=/usr/lib/hadoop-hdfs
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
   # Configuration for YARN (MR2)
   # Configuration for YARN (MR2)
   # ------------------------------------------------------------------------
   # ------------------------------------------------------------------------
   [[yarn_clusters]]
   [[yarn_clusters]]
@@ -356,18 +344,6 @@
       # Change this if your YARN cluster is Kerberos-secured
       # Change this if your YARN cluster is Kerberos-secured
       ## security_enabled=false
       ## security_enabled=false
 
 
-      # Settings about this MR2 cluster. If you install MR2 in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_MR2_HOME or /usr/lib/hadoop-mapreduce
-      ## hadoop_mapred_home=/usr/lib/hadoop-mapreduce
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
       # URL of the ResourceManager API
       # URL of the ResourceManager API
       ## resourcemanager_api_url=http://localhost:8088
       ## resourcemanager_api_url=http://localhost:8088
 
 
@@ -400,18 +376,6 @@
       # Change this if your MapReduce cluster is Kerberos-secured
       # Change this if your MapReduce cluster is Kerberos-secured
       ## security_enabled=false
       ## security_enabled=false
 
 
-      # Settings about this MR1 cluster. If you install MR1 in a
-      # different location, you need to set the following.
-
-      # Defaults to $HADOOP_MR1_HOME or /usr/lib/hadoop-0.20-mapreduce
-      ## hadoop_mapred_home=/usr/lib/hadoop-0.20-mapreduce
-
-      # Defaults to $HADOOP_BIN or /usr/bin/hadoop
-      ## hadoop_bin=/usr/bin/hadoop
-
-      # Defaults to $HADOOP_CONF_DIR or /etc/hadoop/conf
-      ## hadoop_conf_dir=/etc/hadoop/conf
-
     # HA support by specifying multiple clusters
     # HA support by specifying multiple clusters
     # e.g.
     # e.g.
 
 

+ 1 - 2
desktop/core/src/desktop/management/commands/test_windmill.py

@@ -21,7 +21,6 @@ after appropriate setup.
 import sys
 import sys
 import time
 import time
 from optparse import make_option
 from optparse import make_option
-from hadoop import mini_cluster
 
 
 from django.core.management.base import BaseCommand
 from django.core.management.base import BaseCommand
 from windmill.authoring import djangotest
 from windmill.authoring import djangotest
@@ -73,7 +72,7 @@ class Command(BaseCommand):
     This currently doesn't start app-specific
     This currently doesn't start app-specific
     other servers.
     other servers.
     """
     """
-    self.cluster = mini_cluster.shared_cluster(conf=True)
+    pass
 
 
   def stop_helper_servers(self):
   def stop_helper_servers(self):
     self.cluster.shutdown()
     self.cluster.shutdown()

+ 6 - 101
desktop/libs/hadoop/src/hadoop/conf.py

@@ -16,7 +16,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 from django.utils.translation import ugettext_lazy as _t
 from django.utils.translation import ugettext_lazy as _t
-from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, validate_path, coerce_bool
+from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool
 import fnmatch
 import fnmatch
 import logging
 import logging
 import os
 import os
@@ -39,26 +39,6 @@ def find_file_recursive(desired_glob, root):
   f.__doc__ = "Finds %s/%s" % (root, desired_glob)
   f.__doc__ = "Finds %s/%s" % (root, desired_glob)
   return f
   return f
 
 
-HADOOP_PLUGIN_CLASSPATH = Config("hadoop_plugin_classpath",
-  help="[Used only in testing code.] Path to the Hadoop plugin jar.",
-  type=str,
-  dynamic_default=find_file_recursive("hue-plugins-*.jar",
-                root=os.path.join(os.path.dirname(__file__), '..', '..', 'java-lib')),
-  private=True)
-
-SUDO_SHELL_JAR = Config("hadoop_sudo_shell_jar",
-  help="Tool that allows a proxy user UGI to be used to upload files.",
-  type=str,
-  dynamic_default=find_file_recursive("sudo-shell-*.jar",
-                root=os.path.join(os.path.dirname(__file__), '..', '..', 'sudo-shell', 'java-lib')),
-  private=True)
-
-CREDENTIALS_MERGER_JAR = Config("hadoop_credentials_merger_jar",
-  help="Tool that is capable of merging multiple files containing delegation tokens into one.",
-  type=str,
-  dynamic_default=find_file_recursive("credentials-merger-*.jar",
-                root=os.path.join(os.path.dirname(__file__), '..', '..', 'credentials-merger', 'java-lib')),
-  private=True)
 
 
 UPLOAD_CHUNK_SIZE = Config(
 UPLOAD_CHUNK_SIZE = Config(
   key="upload_chunk_size",
   key="upload_chunk_size",
@@ -89,31 +69,6 @@ HDFS_CLUSTERS = UnspecifiedConfigSection(
                               default=False, type=coerce_bool),
                               default=False, type=coerce_bool),
       TEMP_DIR=Config("temp_dir", help="HDFS directory for temporary files",
       TEMP_DIR=Config("temp_dir", help="HDFS directory for temporary files",
                       default='/tmp', type=str),
                       default='/tmp', type=str),
-
-      HADOOP_HDFS_HOME = Config(
-        key="hadoop_hdfs_home",
-        default=os.environ.get("HADOOP_HDFS_HOME", "/usr/lib/hadoop-hdfs"),
-        help=("Path to Hadoop HDFS home - HADOOP_HOME or HADOOP_HDFS_HOME in " +
-              "hadoop parlance. For tarball installations, it is the root of " +
-              "the untarred directory. For packages, " +
-              "it is /usr/lib/hadoop-hdfs." +
-              "Defaults to the environment varible HADOOP_BIN when set, " +
-              "or '/usr/bin/hadoop'."),
-      ),
-      HADOOP_BIN = Config(
-        key="hadoop_bin",
-        default=os.environ.get("HADOOP_BIN", "/usr/bin/hadoop"),
-        help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
-              "Defaults to the environment varible HADOOP_BIN when set, " +
-              "or '/usr/bin/hadoop'.")
-      ),
-      HADOOP_CONF_DIR = Config(
-        key="hadoop_conf_dir",
-        default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
-        help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
-              "as the --config flag. Defaults to the environment variable " +
-              "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
-      ),
     )
     )
   )
   )
 )
 )
@@ -140,29 +95,7 @@ MR_CLUSTERS = UnspecifiedConfigSection(
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
                               default=False, type=coerce_bool),
                               default=False, type=coerce_bool),
       SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
       SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
-                       default=True, type=coerce_bool), # Backward compatibility
-
-      HADOOP_MAPRED_HOME = Config(
-        key="hadoop_mapred_home",
-        default=os.environ.get("HADOOP_MR1_HOME", "/usr/lib/hadoop-0.20-mapreduce"),
-        help=("Path to directory holding Hadoop MR1 libs. " +
-              "E.g. /usr/lib/hadoop. Defaults to the environment variable " +
-              "HADOOP_MR1_HOME when set, or '/usr/lib/hadoop'.")
-      ),
-      HADOOP_BIN = Config(
-        key="hadoop_bin",
-        default=os.environ.get("HADOOP_MR1_BIN", "/usr/bin/hadoop"),
-        help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
-              "Defaults to the environment varible HADOOP_MR1_BIN when set, " +
-              "or '/usr/bin/hadoop'.")
-      ),
-      HADOOP_CONF_DIR = Config(
-        key="hadoop_conf_dir",
-        default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
-        help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
-              "as the --config flag. Defaults to the environment variable " +
-              "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
-      ),
+                       default=True, type=coerce_bool), # True here for backward compatibility
     )
     )
   )
   )
 )
 )
@@ -184,29 +117,7 @@ YARN_CLUSTERS = UnspecifiedConfigSection(
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
       SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
                               default=False, type=coerce_bool),
                               default=False, type=coerce_bool),
       SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
       SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
-                       default=False, type=coerce_bool), # Backward compatibility
-
-      HADOOP_MAPRED_HOME = Config(
-        key="hadoop_mapred_home",
-        default=os.environ.get("HADOOP_MR2_HOME", "/usr/lib/hadoop-mapreduce"),
-        help=("Path to directory holding Hadoop MR2 libs. " +
-              "E.g. /usr/lib/hadoop. Defaults to the environment " +
-              "variable HADOOP_MR2_HOME when set, or '/usr/lib/hadoop'.")
-      ),
-      HADOOP_BIN = Config(
-        key="hadoop_bin",
-        default=os.environ.get("HADOOP_MR2_BIN", "/usr/bin/hadoop"),
-        help=("Path to your Hadoop launcher script. E.g. /usr/bin/hadoop. " +
-              "Defaults to the environment varible HADOOP_MR2_BIN when set, " +
-              "or '/usr/bin/hadoop'.")
-      ),
-      HADOOP_CONF_DIR = Config(
-        key="hadoop_conf_dir",
-        default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
-        help=("Directory to pass to hadoop_bin (from Hadoop configuration) " +
-              "as the --config flag. Defaults to the environment variable " +
-              "HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
-      ),
+                       default=False, type=coerce_bool), # False here for backward compatibility
       IS_YARN=Config("is_yarn", help="Attribute set only on YARN clusters and not MR1 ones.",
       IS_YARN=Config("is_yarn", help="Attribute set only on YARN clusters and not MR1 ones.",
                      default=True, type=coerce_bool),
                      default=True, type=coerce_bool),
       RESOURCE_MANAGER_API_URL=Config("resourcemanager_api_url",
       RESOURCE_MANAGER_API_URL=Config("resourcemanager_api_url",
@@ -231,6 +142,8 @@ def config_validator(user):
   """
   """
   from hadoop.fs import webhdfs
   from hadoop.fs import webhdfs
   from hadoop import job_tracker
   from hadoop import job_tracker
+  from hadoop.yarn import tests
+
   res = []
   res = []
   submit_to = []
   submit_to = []
 
 
@@ -238,9 +151,6 @@ def config_validator(user):
   has_default = False
   has_default = False
   for name in HDFS_CLUSTERS.keys():
   for name in HDFS_CLUSTERS.keys():
     cluster = HDFS_CLUSTERS[name]
     cluster = HDFS_CLUSTERS[name]
-    res.extend(validate_path(cluster.HADOOP_HDFS_HOME, is_dir=True))
-    res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
-    res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
     res.extend(webhdfs.test_fs_configuration(cluster))
     res.extend(webhdfs.test_fs_configuration(cluster))
     if name == 'default':
     if name == 'default':
       has_default = True
       has_default = True
@@ -252,9 +162,6 @@ def config_validator(user):
   for name in MR_CLUSTERS.keys():
   for name in MR_CLUSTERS.keys():
     cluster = MR_CLUSTERS[name]
     cluster = MR_CLUSTERS[name]
     if cluster.SUBMIT_TO.get():
     if cluster.SUBMIT_TO.get():
-      res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
-      res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
-      res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
       mr_down.extend(job_tracker.test_jt_configuration(cluster))
       mr_down.extend(job_tracker.test_jt_configuration(cluster))
       submit_to.append('mapred_clusters.' + name)
       submit_to.append('mapred_clusters.' + name)
   # If HA still failing
   # If HA still failing
@@ -265,9 +172,7 @@ def config_validator(user):
   for name in YARN_CLUSTERS.keys():
   for name in YARN_CLUSTERS.keys():
     cluster = YARN_CLUSTERS[name]
     cluster = YARN_CLUSTERS[name]
     if cluster.SUBMIT_TO.get():
     if cluster.SUBMIT_TO.get():
-      res.extend(validate_path(cluster.HADOOP_MAPRED_HOME, is_dir=True))
-      res.extend(validate_path(cluster.HADOOP_CONF_DIR, is_dir=True))
-      res.extend(validate_path(cluster.HADOOP_BIN, is_dir=False))
+      res.extend(tests.test_yarn_configurations())
       submit_to.append('yarn_clusters.' + name)
       submit_to.append('yarn_clusters.' + name)
 
 
   if not submit_to:
   if not submit_to:

+ 6 - 3
desktop/libs/hadoop/src/hadoop/core_site.py

@@ -14,8 +14,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
+
 """
 """
-Helper for reading core-site.xml
+Deprecated: not used anymore and will be empty
 """
 """
 
 
 import errno
 import errno
@@ -55,10 +56,12 @@ def _parse_core_site():
   global _CORE_SITE_PATH
   global _CORE_SITE_PATH
 
 
   for indentifier in conf.HDFS_CLUSTERS.get():
   for indentifier in conf.HDFS_CLUSTERS.get():
-    _CORE_SITE_PATH = os.path.join(conf.HDFS_CLUSTERS[indentifier].HADOOP_CONF_DIR.get(), 'core-site.xml')
     try:
     try:
+      _CORE_SITE_PATH = os.path.join(conf.HDFS_CLUSTERS[indentifier].HADOOP_CONF_DIR.get(), 'core-site.xml') # Will KeyError and be empty as HADOOP_CONF_DIR does not exist anymore
       data = file(_CORE_SITE_PATH, 'r').read()
       data = file(_CORE_SITE_PATH, 'r').read()
       break
       break
+    except KeyError:
+      data = ""
     except IOError, err:
     except IOError, err:
       if err.errno != errno.ENOENT:
       if err.errno != errno.ENOENT:
         LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
         LOG.error('Cannot read from "%s": %s' % (_CORE_SITE_PATH, err))
@@ -75,4 +78,4 @@ def get_trash_interval():
 
 
   Also indicates whether trash is enabled or not.
   Also indicates whether trash is enabled or not.
   """
   """
-  return get_conf().get(_CNF_TRASH_INTERVAL, None)
+  return get_conf().get(_CNF_TRASH_INTERVAL)

+ 3 - 9
desktop/libs/hadoop/src/hadoop/fs/webhdfs.py

@@ -282,7 +282,7 @@ class WebHdfs(Hdfs):
 
 
   def remove(self, path, skip_trash=False):
   def remove(self, path, skip_trash=False):
     """Delete a file."""
     """Delete a file."""
-    if hadoop.core_site.get_trash_interval() is None or skip_trash:
+    if skip_trash:
       self._delete(path, recursive=False)
       self._delete(path, recursive=False)
     else:
     else:
       self._trash(path, recursive=False)
       self._trash(path, recursive=False)
@@ -293,7 +293,7 @@ class WebHdfs(Hdfs):
 
 
   def rmtree(self, path, skip_trash=False):
   def rmtree(self, path, skip_trash=False):
     """Delete a tree recursively."""
     """Delete a tree recursively."""
-    if hadoop.core_site.get_trash_interval() is None or skip_trash:
+    if skip_trash:
       self._delete(path, recursive=True)
       self._delete(path, recursive=True)
     else:
     else:
       self._trash(path, recursive=True)
       self._trash(path, recursive=True)
@@ -306,9 +306,6 @@ class WebHdfs(Hdfs):
     Removing the root from ``path`` will provide the original path.
     Removing the root from ``path`` will provide the original path.
     Ensure parent directories exist and rename path.
     Ensure parent directories exist and rename path.
     """
     """
-    if hadoop.core_site.get_trash_interval() is None:
-      raise IOError(errno.EPERM, _("Trash is not enabled."))
-
     if not path.startswith(self.trash_path):
     if not path.startswith(self.trash_path):
       raise IOError(errno.EPERM, _("File %s is not in trash") % path)
       raise IOError(errno.EPERM, _("File %s is not in trash") % path)
 
 
@@ -333,9 +330,6 @@ class WebHdfs(Hdfs):
 
 
     Purge all trash in users ``trash_path``
     Purge all trash in users ``trash_path``
     """
     """
-    if hadoop.core_site.get_trash_interval() is None:
-      raise IOError(errno.EPERM, _("Trash is not enabled."))
-
     for timestamped_directory in self.listdir(self.trash_path):
     for timestamped_directory in self.listdir(self.trash_path):
       self.rmtree(self.join(self.trash_path, timestamped_directory), True)
       self.rmtree(self.join(self.trash_path, timestamped_directory), True)
 
 
@@ -782,7 +776,7 @@ def test_fs_configuration(fs_config):
             _('Failed to create temporary file "%s"') % tmpname)]
             _('Failed to create temporary file "%s"') % tmpname)]
 
 
   # Check superuser has super power
   # Check superuser has super power
-  try:  # Finally: delete tmpname
+  try:
     try:
     try:
       fs.chown(tmpname, fs.superuser)
       fs.chown(tmpname, fs.superuser)
     except Exception, ex:
     except Exception, ex:

+ 1 - 3
desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py

@@ -439,7 +439,7 @@ class PseudoHdfs4(object):
     yarn_configs = {
     yarn_configs = {
       'yarn.resourcemanager.resource-tracker.address': '%s:%s' % (self._fqdn, self._rm_resource_port,),
       'yarn.resourcemanager.resource-tracker.address': '%s:%s' % (self._fqdn, self._rm_resource_port,),
       'yarn.resourcemanager.address': '%s:%s' % (self._fqdn, self._rm_port,),
       'yarn.resourcemanager.address': '%s:%s' % (self._fqdn, self._rm_port,),
-      'yarn.resourcemanager.scheduler.address': '%s:%s' % (self._fqdn, 8030,), #self._rm_scheduler_port # /!\ Hardcoded for now
+      'yarn.resourcemanager.scheduler.address': '%s:%s' % (self._fqdn, self._rm_scheduler_port,),
       'yarn.resourcemanager.scheduler.class': 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler',
       'yarn.resourcemanager.scheduler.class': 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler',
       'yarn.resourcemanager.admin.address': '%s:%s' % (self._fqdn, self._rm_admin_port,),
       'yarn.resourcemanager.admin.address': '%s:%s' % (self._fqdn, self._rm_admin_port,),
       'yarn.resourcemanager.webapp.address': '%s:%s' % (self._fqdn, self._rm_webapp_port,),
       'yarn.resourcemanager.webapp.address': '%s:%s' % (self._fqdn, self._rm_webapp_port,),
@@ -515,9 +515,7 @@ def shared_cluster():
     closers = [
     closers = [
       hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
       hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
       hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
       hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
-      hadoop.conf.HDFS_CLUSTERS['default'].HADOOP_CONF_DIR.set_for_testing(cluster.hadoop_conf_dir),
 
 
-      hadoop.conf.YARN_CLUSTERS['default'].HADOOP_CONF_DIR.set_for_testing(cluster.hadoop_conf_dir),
       hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
       hadoop.conf.YARN_CLUSTERS['default'].HOST.set_for_testing(fqdn),
       hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
       hadoop.conf.YARN_CLUSTERS['default'].PORT.set_for_testing(cluster._rm_port),
 
 

+ 40 - 0
desktop/libs/hadoop/src/hadoop/yarn/tests.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Licensed to Cloudera, Inc. under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  Cloudera, Inc. licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from hadoop.yarn.resource_manager_api import get_resource_manager
+
+
+LOG = logging.getLogger(__name__)
+
+
+def test_yarn_configurations():
+  # Single cluster for now
+
+  result = []
+
+  try:
+    url = ''
+    api = get_resource_manager()
+    url = api._url
+    api.apps()
+  except Exception, e:
+    msg = 'Failed to contact Resource Manager at %s: %s' % (url, e)
+    result.append(('Resource Manager', msg))
+
+  return result