org.apache.hadoop.mapreduce.v2.app.AppContext

Here are the examples of the java api org.apache.hadoop.mapreduce.v2.app.AppContext taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

107 Examples 7

19 Source : HsSingleJobBlock.java
with Apache License 2.0
from Qihoo360

public clreplaced HsSingleJobBlock extends HtmlBlock implements AMParams {

    final AppContext appContext;

    @Inject
    HsSingleJobBlock(AppContext appctx) {
        appContext = appctx;
    }

    @Override
    protected void render(Block html) {
        int numContainers = Integer.parseInt($(CONTAINER_NUMBER));
        if (numContainers > 0) {
            Hamlet.TBODY<Hamlet.TABLE<Hamlet>> tbody = html.h2("All Containers:").table("#Containers").thead("ui-widget-header").tr().th("ui-state-default", "Container ID").th("ui-state-default", "Container Host").th("ui-state-default", "Container Status").th("ui-state-default", "Start Time").th("ui-state-default", "Finish Time").th("ui-state-default", "Reporter Progress")._()._().tbody();
            for (int i = 0; i < numContainers; i++) {
                Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = tbody._().tbody("ui-widget-content").tr().$style("text-align:center;").td();
                td.span().$replacedle(String.format($(CONTAINER_ID + i)))._().a($(CONTAINER_LOG_ADDRESS + i), String.format($(CONTAINER_ID + i)));
                String containerMachine = $(CONTAINER_HTTP_ADDRESS + i);
                if ($(CONTAINER_REPORTER_PROGRESS + i).equals("progress log format error")) {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td($(CONTAINER_REPORTER_PROGRESS + i))._();
                } else if ($(CONTAINER_REPORTER_PROGRESS + i).equals("0.00%")) {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td("N/A")._();
                } else {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td().div().$clreplaced("ui-progressbar ui-widget ui-widget-content ui-corner-all").$replacedle($(CONTAINER_REPORTER_PROGRESS + i)).div().$clreplaced("ui-progressbar-value ui-widget-header ui-corner-left").$style("width:" + $(CONTAINER_REPORTER_PROGRESS + i))._()._()._()._();
                }
            }
            if ($(BOARD_INFO_FLAG).equals("true")) {
                tbody._()._().div().$style("margin:40px 2px;")._(" ")._().h2("View Board:").table("#Board").thead("ui-widget-header").tr().th("ui-state-default", "Board Info")._()._().tbody("ui-widget-content").tr().$style("text-align:center;").td($(BOARD_INFO))._()._()._();
            } else {
                tbody._()._();
            }
            int timestampSize = Integer.parseInt($(TIMESTAMP_TOTAL));
            int outputSize = Integer.parseInt($(OUTPUT_TOTAL));
            if (timestampSize > 0) {
                html.div().$style("margin:20px 2px;")._(" ")._();
                Hamlet.TBODY<TABLE<Hamlet>> tbodySave = html.h2("Saved Model").table("#savedmodel").thead("ui-widget-header").tr().th("ui-state-default", "Saved timeStamp").th("ui-state-default", "Saved path")._()._().tbody();
                for (int i = 0; i < timestampSize; i++) {
                    String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date(Long.parseLong($(TIMESTAMP_LIST + i))));
                    Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = tbodySave._().tbody("ui-widget-content").tr().$style("text-align:center;").td(timeStamp).td();
                    String pathStr = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss").format(new Date(Long.parseLong($(TIMESTAMP_LIST + i))));
                    for (int j = 0; j < outputSize; j++) {
                        td.p()._($(OUTPUT_PATH + j) + pathStr)._();
                    }
                    td._()._();
                }
                tbodySave._()._();
            }
            if (Boolean.parseBoolean($(CONTAINER_CPU_STATISTICS))) {
                int numWorkers = Integer.parseInt($(WORKER_NUMBER));
                html.div().$style("margin:20px 2px;")._(" ")._();
                // resource applied info
                Hamlet.TBODY<TABLE<Hamlet>> resourceAppliedInfo = html.h2("Resource Applied Info:").table("#resourceAppliedInfo").thead("ui-widget-header").tr().th("ui-state-default", "Role").th("ui-state-default", "Number").th("ui-state-default", "CPU Memory(GB)").th("ui-state-default", "CPU Cores")._()._().tbody();
                if (numWorkers > 0) {
                    resourceAppliedInfo._().tbody("ui-widget-content").tr().$style("text-align:center;").td("worker").td(String.valueOf(numWorkers)).td($(WORKER_MEMORY)).td($(WORKER_VCORES))._();
                }
                resourceAppliedInfo._()._();
                html.div().$style("margin:20px 2px;")._(" ")._();
                // worker containers resource usage statistics info
                if (numWorkers > 0) {
                    Hamlet.TBODY<TABLE<Hamlet>> workerCPUUsage = html.h2("Worker Containers CPU Usage Info:").table("#workerCPUUsageInfo").thead("ui-widget-header").tr().th("ui-state-default", "ContainerID").th("ui-state-default", "CPU memory average usages(GB)").th("ui-state-default", "CPU memory max usages(GB)").th("ui-state-default", "CPU utilization average usages(%)").th("ui-state-default", "CPU utilization max usages(%)")._()._().tbody();
                    for (int i = 0; i < numWorkers; i++) {
                        Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = workerCPUUsage._().tbody("ui-widget-content").tr().$style("text-align:center;").td($("WORKER_CONTAINER_ID" + i)).td($("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_AVG + i)).td();
                        String memWarn = $("worker" + CONTAINER_CPU_USAGE_WARN_MEM + i);
                        if (memWarn != null && memWarn != "" && Boolean.valueOf(memWarn)) {
                            td.$style("color:red").b(String.format("%s\t( Current cpu memory used is much less than applied. Please adjust !! )", $("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i)));
                        } else {
                            td._($("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i));
                        }
                        td._().td($("worker" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_AVG + i)).td($("worker" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_MAX + i))._();
                    }
                    workerCPUUsage._()._();
                    html.div().$style("margin:20px 2px;")._(" ")._();
                }
            }
            html.div().$style("margin:20px 2px;")._(" ")._();
            if (Boolean.parseBoolean($(CONTAINER_CPU_METRICS_ENABLE))) {
                int numWorkers = Integer.parseInt($(WORKER_NUMBER));
                for (int i = 0; i < numWorkers; i++) {
                    if (!$("workerCpuMemMetrics" + i).equals("") && $("workerCpuMemMetrics" + i) != null) {
                        html.div().$style("margin:20px 2px;font-weight:bold;font-size:12px")._(String.format($(CONTAINER_ID + i)) + " metrics:")._();
                    }
                    html.script().$src("/static/xlWebApp/jquery-3.1.1.min.js")._();
                    html.script().$src("/static/xlWebApp/highstock.js")._();
                    html.script().$src("/static/xlWebApp/exporting.js")._();
                    String containerCpuMemID = "workercontainerCpuMem" + i;
                    String containerCpuUtilID = "workercontainerCpuUtil" + i;
                    String containerClreplaced = "container" + i;
                    String seriesCpuMemOptions = "[{\n" + "            name: 'cpu mem used',\n" + "            data: " + $("workerCpuMemMetrics" + i) + "\n" + "        }]";
                    String seriesCpuUtilOptions = "[{\n" + "            name: 'cpu util',\n" + "            data: " + $("workerCpuUtilMetrics" + i) + "\n" + "        }]";
                    if (!$("workerCpuUtilMetrics" + i).equals("") && $("workerCpuUtilMetrics" + i) != null) {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._().div().$id(containerCpuUtilID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    } else {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    }
                    String css = "." + containerClreplaced + "{\n" + "    display:inline-block;\n" + "}";
                    html.style().$type("text/css")._(css)._();
                    String striptHead = "Highcharts.setOptions({\n" + "    global: {\n" + "        useUTC: false\n" + "    }\n" + "});\n" + "// Create the chart\n";
                    String striptBody = "Highcharts.stockChart(" + containerCpuMemID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu memory used( GB )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuMemOptions + "\n" + "});\n";
                    if (!$("workerCpuUtilMetrics" + i).equals("") && $("workerCpuUtilMetrics" + i) != null) {
                        striptBody += "Highcharts.stockChart(" + containerCpuUtilID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu utilization( % )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuUtilOptions + "\n" + "});\n";
                    }
                    html.script().$type("text/javascript")._(striptHead + striptBody)._();
                }
            }
        } else {
            html.div().$style("font-size:20px;")._("Job History Log getting error !")._();
        }
    }
}

19 Source : HsJobBlock.java
with Apache License 2.0
from Qihoo360

public clreplaced HsJobBlock extends HtmlBlock implements AMParams {

    final AppContext appContext;

    @Inject
    HsJobBlock(AppContext appctx) {
        appContext = appctx;
    }

    @Override
    protected void render(Block html) {
        int numContainers = Integer.parseInt($(CONTAINER_NUMBER));
        if (numContainers > 0) {
            Hamlet.TBODY<Hamlet.TABLE<Hamlet>> tbody = html.h2("All Containers:").table("#Containers").thead("ui-widget-header").tr().th("ui-state-default", "Container ID").th("ui-state-default", "Container Host").th("ui-state-default", "Container Role").th("ui-state-default", "Container Status").th("ui-state-default", "Start Time").th("ui-state-default", "Finish Time").th("ui-state-default", "Reporter Progress")._()._().tbody();
            for (int i = 0; i < numContainers; i++) {
                Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = tbody._().tbody("ui-widget-content").tr().$style("text-align:center;").td();
                td.span().$replacedle(String.format($(CONTAINER_ID + i)))._().a($(CONTAINER_LOG_ADDRESS + i), String.format($(CONTAINER_ID + i)));
                String containerMachine = $(CONTAINER_HTTP_ADDRESS + i);
                if ($(CONTAINER_REPORTER_PROGRESS + i).equals("progress log format error")) {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_ROLE + i)).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td($(CONTAINER_REPORTER_PROGRESS + i))._();
                } else if ($(CONTAINER_REPORTER_PROGRESS + i).equals("0.00%")) {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_ROLE + i)).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td("N/A")._();
                } else {
                    td._().td(containerMachine.split(":")[0]).td($(CONTAINER_ROLE + i)).td($(CONTAINER_STATUS + i)).td($(CONTAINER_START_TIME + i)).td($(CONTAINER_FINISH_TIME + i)).td().div().$clreplaced("ui-progressbar ui-widget ui-widget-content ui-corner-all").$replacedle($(CONTAINER_REPORTER_PROGRESS + i)).div().$clreplaced("ui-progressbar-value ui-widget-header ui-corner-left").$style("width:" + $(CONTAINER_REPORTER_PROGRESS + i))._()._()._()._();
                }
            }
            if ($(BOARD_INFO_FLAG).equals("true")) {
                tbody._()._().div().$style("margin:40px 2px;")._(" ")._().h2("View Board:").table("#Board").thead("ui-widget-header").tr().th("ui-state-default", "Board Info")._()._().tbody("ui-widget-content").tr().$style("text-align:center;").td($(BOARD_INFO))._()._()._();
            } else {
                tbody._()._();
            }
            int timestampSize = Integer.parseInt($(TIMESTAMP_TOTAL));
            int outputSize = Integer.parseInt($(OUTPUT_TOTAL));
            if (timestampSize > 0) {
                html.div().$style("margin:20px 2px;")._(" ")._();
                Hamlet.TBODY<TABLE<Hamlet>> tbodySave = html.h2("Saved Model").table("#savedmodel").thead("ui-widget-header").tr().th("ui-state-default", "Saved timeStamp").th("ui-state-default", "Saved path")._()._().tbody();
                for (int i = 0; i < timestampSize; i++) {
                    String timeStamp = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date(Long.parseLong($(TIMESTAMP_LIST + i))));
                    Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = tbodySave._().tbody("ui-widget-content").tr().$style("text-align:center;").td(timeStamp).td();
                    String pathStr = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss").format(new Date(Long.parseLong($(TIMESTAMP_LIST + i))));
                    for (int j = 0; j < outputSize; j++) {
                        td.p()._($(OUTPUT_PATH + j) + pathStr)._();
                    }
                    td._()._();
                }
                tbodySave._()._();
            }
            if (Boolean.parseBoolean($(CONTAINER_CPU_STATISTICS))) {
                int numWorkers = Integer.parseInt($(WORKER_NUMBER));
                int numPS = Integer.parseInt($(PS_NUMBER));
                html.div().$style("margin:20px 2px;")._(" ")._();
                // resource applied info
                Hamlet.TBODY<TABLE<Hamlet>> resourceAppliedInfo = html.h2("Resource Applied Info:").table("#resourceAppliedInfo").thead("ui-widget-header").tr().th("ui-state-default", "Role").th("ui-state-default", "Number").th("ui-state-default", "CPU Memory(GB)").th("ui-state-default", "CPU Cores")._()._().tbody();
                if (numWorkers > 0) {
                    resourceAppliedInfo._().tbody("ui-widget-content").tr().$style("text-align:center;").td("worker").td(String.valueOf(numWorkers)).td($(WORKER_MEMORY)).td($(WORKER_VCORES))._();
                }
                if ($(CHIEF_WORKER_MEMORY) != null && $(CHIEF_WORKER_MEMORY).trim() != "") {
                    resourceAppliedInfo._().tbody("ui-widget-content").tr().$style("text-align:center;").td("worker/chief").td("-").td($(CHIEF_WORKER_MEMORY)).td("-")._();
                }
                if ($(EVALUATOR_WORKER_MEMORY) != null && $(EVALUATOR_WORKER_MEMORY).trim() != "") {
                    resourceAppliedInfo._().tbody("ui-widget-content").tr().$style("text-align:center;").td("worker/evaluator").td("-").td($(EVALUATOR_WORKER_MEMORY)).td("-")._();
                }
                if (numPS > 0) {
                    resourceAppliedInfo._().tbody("ui-widget-content").tr().$style("text-align:center;").td("ps").td(String.valueOf(numPS)).td($(PS_MEMORY)).td($(PS_VCORES))._();
                }
                resourceAppliedInfo._()._();
                // worker/ps containers resource usage statistics info
                if (numWorkers > 0) {
                    Hamlet.TBODY<TABLE<Hamlet>> workerCPUUsage = html.h2("Worker Containers CPU Usage Info:").table("#workerCPUUsageInfo").thead("ui-widget-header").tr().th("ui-state-default", "ContainerID").th("ui-state-default", "CPU memory average usages(GB)").th("ui-state-default", "CPU memory max usages(GB)").th("ui-state-default", "CPU utilization average usages(%)").th("ui-state-default", "CPU utilization max usages(%)")._()._().tbody();
                    for (int i = 0; i < numWorkers; i++) {
                        Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = workerCPUUsage._().tbody("ui-widget-content").tr().$style("text-align:center;").td($("WORKER_CONTAINER_ID" + i)).td($("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_AVG + i)).td();
                        String memWarn = $("worker" + CONTAINER_CPU_USAGE_WARN_MEM + i);
                        if (memWarn != null && memWarn != "" && Boolean.valueOf(memWarn)) {
                            td.$style("color:red").b(String.format("%s\t( Current cpu memory used is much less than applied. Please adjust !! )", $("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i)));
                        } else {
                            td._($("worker" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i));
                        }
                        td._().td($("worker" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_AVG + i)).td($("worker" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_MAX + i))._();
                    }
                    workerCPUUsage._()._();
                    html.div().$style("margin:20px 2px;")._(" ")._();
                }
                if (numPS > 0) {
                    html.div().$style("margin:20px 2px;")._(" ")._();
                    Hamlet.TBODY<TABLE<Hamlet>> psCPUUsage = html.h2("PS Containers CPU Usage Info:").table("#psCPUUsageInfo").thead("ui-widget-header").tr().th("ui-state-default", "ContainerID").th("ui-state-default", "CPU memory average usages(GB)").th("ui-state-default", "CPU memory max usages(GB)").th("ui-state-default", "CPU utilization average usages(%)").th("ui-state-default", "CPU utilization max usages(%)")._()._().tbody();
                    for (int i = 0; i < numPS; i++) {
                        Hamlet.TD<Hamlet.TR<Hamlet.TBODY<TABLE<Hamlet>>>> td = psCPUUsage._().tbody("ui-widget-content").tr().$style("text-align:center;").td($("PS_CONTAINER_ID" + i)).td($("ps" + CONTAINER_CPU_STATISTICS_MEM + USAGE_AVG + i)).td();
                        String memWarn = $("ps" + CONTAINER_CPU_USAGE_WARN_MEM + i);
                        if (memWarn != null && memWarn != "" && Boolean.valueOf(memWarn)) {
                            td.$style("color:red").b(String.format("%s\t( Current cpu memory used is much less than applied. Please adjust !! )", $("ps" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i)));
                        } else {
                            td._($("ps" + CONTAINER_CPU_STATISTICS_MEM + USAGE_MAX + i));
                        }
                        td._().td($("ps" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_AVG + i)).td($("ps" + CONTAINER_CPU_STATISTICS_UTIL + USAGE_MAX + i))._();
                    }
                    psCPUUsage._()._();
                    html.div().$style("margin:20px 2px;")._(" ")._();
                }
            }
            html.div().$style("margin:20px 2px;")._(" ")._();
            if (Boolean.parseBoolean($(CONTAINER_CPU_METRICS_ENABLE))) {
                int numPS = Integer.parseInt($(PS_NUMBER));
                for (int i = 0; i < numPS; i++) {
                    if (!$("psCpuMemMetrics" + i).equals("") && $("psCpuMemMetrics" + i) != null) {
                        html.div().$style("margin:20px 2px;font-weight:bold;font-size:12px")._(String.format($("PS_CONTAINER_ID" + i)) + " metrics:")._();
                    }
                    html.script().$src("/static/xlWebApp/jquery-3.1.1.min.js")._();
                    html.script().$src("/static/xlWebApp/highstock.js")._();
                    html.script().$src("/static/xlWebApp/exporting.js")._();
                    String containerCpuMemID = "psContainerCpuMem" + i;
                    String containerCpuUtilID = "psContainerCpuUtil" + i;
                    String containerClreplaced = "container" + i;
                    String seriesCpuMemOptions = "[{\n" + "            name: 'cpu mem used',\n" + "            data: " + $("psCpuMemMetrics" + i) + "\n" + "        }]";
                    String seriesCpuUtilOptions = "[{\n" + "            name: 'cpu util',\n" + "            data: " + $("psCpuUtilMetrics" + i) + "\n" + "        }]";
                    if (!$("psCpuUtilMetrics" + i).equals("") && $("psCpuUtilMetrics" + i) != null) {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._().div().$id(containerCpuUtilID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    } else {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    }
                    String css = "." + containerClreplaced + "{\n" + "    display:inline-block;\n" + "}";
                    html.style().$type("text/css")._(css)._();
                    String striptHead = "Highcharts.setOptions({\n" + "    global: {\n" + "        useUTC: false\n" + "    }\n" + "});\n" + "// Create the chart\n";
                    String striptBody = "Highcharts.stockChart(" + containerCpuMemID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu memory used( GB )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuMemOptions + "\n" + "});\n";
                    if (!$("psCpuUtilMetrics" + i).equals("") && $("psCpuUtilMetrics" + i) != null) {
                        striptBody += "Highcharts.stockChart(" + containerCpuUtilID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu utilization( % )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuUtilOptions + "\n" + "});\n";
                    }
                    html.script().$type("text/javascript")._(striptHead + striptBody)._();
                }
                int numWorkers = Integer.parseInt($(WORKER_NUMBER));
                for (int i = 0; i < numWorkers; i++) {
                    if (!$("workerCpuMemMetrics" + i).equals("") && $("workerCpuMemMetrics" + i) != null) {
                        html.div().$style("margin:20px 2px;font-weight:bold;font-size:12px")._(String.format($("WORKER_CONTAINER_ID" + i)) + " metrics:")._();
                    }
                    html.script().$src("/static/xlWebApp/jquery-3.1.1.min.js")._();
                    html.script().$src("/static/xlWebApp/highstock.js")._();
                    html.script().$src("/static/xlWebApp/exporting.js")._();
                    String containerCpuMemID = "workerContainerCpuMem" + i;
                    String containerCpuUtilID = "workerContainerCpuUtil" + i;
                    String containerClreplaced = "container" + i;
                    String seriesCpuMemOptions = "[{\n" + "            name: 'cpu mem used',\n" + "            data: " + $("workerCpuMemMetrics" + i) + "\n" + "        }]";
                    String seriesCpuUtilOptions = "[{\n" + "            name: 'cpu util',\n" + "            data: " + $("workerCpuUtilMetrics" + i) + "\n" + "        }]";
                    if (!$("workerCpuUtilMetrics" + i).equals("") && $("workerCpuUtilMetrics" + i) != null) {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._().div().$id(containerCpuUtilID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    } else {
                        html.div().div().$id(containerCpuMemID).$clreplaced(containerClreplaced).$style("height: 400px; min-width: 310px; diplay:inline-block")._()._();
                    }
                    String css = "." + containerClreplaced + "{\n" + "    display:inline-block;\n" + "}";
                    html.style().$type("text/css")._(css)._();
                    String striptHead = "Highcharts.setOptions({\n" + "    global: {\n" + "        useUTC: false\n" + "    }\n" + "});\n" + "// Create the chart\n";
                    String striptBody = "Highcharts.stockChart(" + containerCpuMemID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu memory used( GB )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuMemOptions + "\n" + "});\n";
                    if (!$("workerCpuUtilMetrics" + i).equals("") && $("workerCpuUtilMetrics" + i) != null) {
                        striptBody += "Highcharts.stockChart(" + containerCpuUtilID + ", {\n" + "    chart: {\n" + "        width: 550\n" + "    },\n" + "\n" + "    rangeSelector: {\n" + "        buttons: [{\n" + "            count: 1,\n" + "            type: 'minute',\n" + "            text: '1M'\n" + "        }, {\n" + "            count: 5,\n" + "            type: 'minute',\n" + "            text: '5M'\n" + "        }, {\n" + "            type: 'all',\n" + "            text: 'All'\n" + "        }],\n" + "        inputEnabled: false,\n" + "        selected: 0\n" + "    },\n" + "\n" + "    replacedle: {\n" + "        text: 'cpu utilization( % )'\n" + "    },\n" + "\n" + "    credits: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    exporting: {\n" + "        enabled: false\n" + "    },\n" + "\n" + "    series: " + seriesCpuUtilOptions + "\n" + "});\n";
                    }
                    html.script().$type("text/javascript")._(striptHead + striptBody)._();
                }
            }
        } else {
            html.div().$style("font-size:20px;")._("Job History Log getting error !")._();
        }
    }
}

19 Source : HsJobBlock.java
with Apache License 2.0
from NJUJYB

/**
 * Render a block of HTML for a give job.
 */
public clreplaced HsJobBlock extends HtmlBlock {

    final AppContext appContext;

    @Inject
    HsJobBlock(AppContext appctx) {
        appContext = appctx;
    }

    /*
   * (non-Javadoc)
   * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
   */
    @Override
    protected void render(Block html) {
        String jid = $(JOB_ID);
        if (jid.isEmpty()) {
            html.p()._("Sorry, can't do anything without a JobID.")._();
            return;
        }
        JobId jobID = MRApps.toJobID(jid);
        Job j = appContext.getJob(jobID);
        if (j == null) {
            html.p()._("Sorry, ", jid, " not found.")._();
            return;
        }
        List<AMInfo> amInfos = j.getAMInfos();
        JobInfo job = new JobInfo(j);
        ResponseInfo infoBlock = info("Job Overview")._("Job Name:", job.getName())._("User Name:", job.getUserName())._("Queue:", job.getQueueName())._("State:", job.getState())._("Uberized:", job.isUber())._("Submitted:", new Date(job.getSubmitTime()))._("Started:", new Date(job.getStartTime()))._("Finished:", new Date(job.getFinishTime()))._("Elapsed:", StringUtils.formatTime(Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
        String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
        // todo - switch to use JobInfo
        List<String> diagnostics = j.getDiagnostics();
        if (diagnostics != null && !diagnostics.isEmpty()) {
            StringBuffer b = new StringBuffer();
            for (String diag : diagnostics) {
                b.append(diag);
            }
            infoBlock._("Diagnostics:", b.toString());
        }
        if (job.getNumMaps() > 0) {
            infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
        }
        if (job.getNumReduces() > 0) {
            infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
            infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
            infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
        }
        for (ConfEntryInfo entry : job.getAcls()) {
            infoBlock._("ACL " + entry.getName() + ":", entry.getValue());
        }
        DIV<Hamlet> div = html._(InfoBlock.clreplaced).div(_INFO_WRAP);
        // MRAppMasters Table
        TABLE<DIV<Hamlet>> table = div.table("#job");
        table.tr().th(amString)._().tr().th(_TH, "Attempt Number").th(_TH, "Start Time").th(_TH, "Node").th(_TH, "Logs")._();
        boolean odd = false;
        for (AMInfo amInfo : amInfos) {
            AMAttemptInfo attempt = new AMAttemptInfo(amInfo, job.getId(), job.getUserName(), "", "");
            table.tr((odd = !odd) ? _ODD : _EVEN).td(String.valueOf(attempt.getAttemptId())).td(new Date(attempt.getStartTime()).toString()).td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._().td().a(".logslink", url(attempt.getShortLogsLink()), "logs")._()._();
        }
        table._();
        div._();
        html.div(_INFO_WRAP).table("#job").tr().th(_TH, "Task Type").th(_TH, "Total").th(_TH, "Complete")._().tr(_ODD).th().a(url("tasks", jid, "m"), "Map")._().td(String.valueOf(String.valueOf(job.getMapsTotal()))).td(String.valueOf(String.valueOf(job.getMapsCompleted())))._().tr(_EVEN).th().a(url("tasks", jid, "r"), "Reduce")._().td(String.valueOf(String.valueOf(job.getReducesTotal()))).td(String.valueOf(String.valueOf(job.getReducesCompleted())))._()._().table("#job").tr().th(_TH, "Attempt Type").th(_TH, "Failed").th(_TH, "Killed").th(_TH, "Successful")._().tr(_ODD).th("Maps").td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(job.getFailedMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(job.getKilledMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(job.getSuccessfulMapAttempts()))._()._().tr(_EVEN).th("Reduces").td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(job.getFailedReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(job.getKilledReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(job.getSuccessfulReduceAttempts()))._()._()._()._();
    }
}

19 Source : JobsBlock.java
with Apache License 2.0
from NJUJYB

public clreplaced JobsBlock extends HtmlBlock {

    final AppContext appContext;

    @Inject
    JobsBlock(AppContext appCtx) {
        appContext = appCtx;
    }

    @Override
    protected void render(Block html) {
        TBODY<TABLE<Hamlet>> tbody = html.h2("Active Jobs").table("#jobs").thead().tr().th(".id", "Job ID").th(".name", "Name").th(".state", "State").th("Map Progress").th("Maps Total").th("Maps Completed").th("Reduce Progress").th("Reduces Total").th("Reduces Completed")._()._().tbody();
        for (Job j : appContext.getAllJobs().values()) {
            JobInfo job = new JobInfo(j, false);
            tbody.tr().td().span().$replacedle(String.valueOf(job.getId()))._().a(url("job", job.getId()), job.getId())._().td(job.getName()).td(job.getState()).td().span().$replacedle(job.getMapProgressPercent())._().div(_PROGRESSBAR).$replacedle(// tooltip
            join(job.getMapProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", job.getMapProgressPercent(), '%'))._()._()._().td(String.valueOf(job.getMapsTotal())).td(String.valueOf(job.getMapsCompleted())).td().span().$replacedle(job.getReduceProgressPercent())._().div(_PROGRESSBAR).$replacedle(// tooltip
            join(job.getReduceProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._().td(String.valueOf(job.getReducesTotal())).td(String.valueOf(job.getReducesCompleted()))._();
        }
        tbody._()._();
    }
}

19 Source : JobBlock.java
with Apache License 2.0
from NJUJYB

public clreplaced JobBlock extends HtmlBlock {

    final AppContext appContext;

    @Inject
    JobBlock(AppContext appctx) {
        appContext = appctx;
    }

    @Override
    protected void render(Block html) {
        String jid = $(JOB_ID);
        if (jid.isEmpty()) {
            html.p()._("Sorry, can't do anything without a JobID.")._();
            return;
        }
        JobId jobID = MRApps.toJobID(jid);
        Job job = appContext.getJob(jobID);
        if (job == null) {
            html.p()._("Sorry, ", jid, " not found.")._();
            return;
        }
        List<AMInfo> amInfos = job.getAMInfos();
        String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
        JobInfo jinfo = new JobInfo(job, true);
        info("Job Overview")._("Job Name:", jinfo.getName())._("State:", jinfo.getState())._("Uberized:", jinfo.isUberized())._("Started:", new Date(jinfo.getStartTime()))._("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
        DIV<Hamlet> div = html._(InfoBlock.clreplaced).div(_INFO_WRAP);
        // MRAppMasters Table
        TABLE<DIV<Hamlet>> table = div.table("#job");
        table.tr().th(amString)._().tr().th(_TH, "Attempt Number").th(_TH, "Start Time").th(_TH, "Node").th(_TH, "Logs")._();
        for (AMInfo amInfo : amInfos) {
            AMAttemptInfo attempt = new AMAttemptInfo(amInfo, jinfo.getId(), jinfo.getUserName());
            table.tr().td(String.valueOf(attempt.getAttemptId())).td(new Date(attempt.getStartTime()).toString()).td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._().td().a(".logslink", url(attempt.getLogsLink()), "logs")._()._();
        }
        table._();
        div._();
        html.div(_INFO_WRAP).table("#job").tr().th(_TH, "Task Type").th(_TH, "Progress").th(_TH, "Total").th(_TH, "Pending").th(_TH, "Running").th(_TH, "Complete")._().tr(_ODD).th("Map").td().div(_PROGRESSBAR).$replacedle(// tooltip
        join(jinfo.getMapProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._().td().a(url("tasks", jid, "m", "ALL"), String.valueOf(jinfo.getMapsTotal()))._().td().a(url("tasks", jid, "m", "PENDING"), String.valueOf(jinfo.getMapsPending()))._().td().a(url("tasks", jid, "m", "RUNNING"), String.valueOf(jinfo.getMapsRunning()))._().td().a(url("tasks", jid, "m", "COMPLETED"), String.valueOf(jinfo.getMapsCompleted()))._()._().tr(_EVEN).th("Reduce").td().div(_PROGRESSBAR).$replacedle(// tooltip
        join(jinfo.getReduceProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._().td().a(url("tasks", jid, "r", "ALL"), String.valueOf(jinfo.getReducesTotal()))._().td().a(url("tasks", jid, "r", "PENDING"), String.valueOf(jinfo.getReducesPending()))._().td().a(url("tasks", jid, "r", "RUNNING"), String.valueOf(jinfo.getReducesRunning()))._().td().a(url("tasks", jid, "r", "COMPLETED"), String.valueOf(jinfo.getReducesCompleted()))._()._()._().table("#job").tr().th(_TH, "Attempt Type").th(_TH, "New").th(_TH, "Running").th(_TH, "Failed").th(_TH, "Killed").th(_TH, "Successful")._().tr(_ODD).th("Maps").td().a(url("attempts", jid, "m", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledMapAttempts()))._().td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulMapAttempts()))._()._().tr(_EVEN).th("Reduces").td().a(url("attempts", jid, "r", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledReduceAttempts()))._().td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulReduceAttempts()))._()._()._()._();
    }
}

19 Source : ConfBlock.java
with Apache License 2.0
from NJUJYB

/**
 * Render the configuration for this job.
 */
public clreplaced ConfBlock extends HtmlBlock {

    final AppContext appContext;

    @Inject
    ConfBlock(AppContext appctx) {
        appContext = appctx;
    }

    /*
   * (non-Javadoc)
   * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
   */
    @Override
    protected void render(Block html) {
        String jid = $(JOB_ID);
        if (jid.isEmpty()) {
            html.p()._("Sorry, can't do anything without a JobID.")._();
            return;
        }
        JobId jobID = MRApps.toJobID(jid);
        Job job = appContext.getJob(jobID);
        if (job == null) {
            html.p()._("Sorry, ", jid, " not found.")._();
            return;
        }
        Path confPath = job.getConfFile();
        try {
            ConfInfo info = new ConfInfo(job);
            html.div().h3(confPath.toString())._();
            TBODY<TABLE<Hamlet>> tbody = html.table("#conf").thead().tr().th(_TH, "key").th(_TH, "value").th(_TH, "source chain")._()._().tbody();
            for (ConfEntryInfo entry : info.getProperties()) {
                StringBuffer buffer = new StringBuffer();
                String[] sources = entry.getSource();
                // Skip the last entry, because it is always the same HDFS file, and
                // output them in reverse order so most recent is output first
                boolean first = true;
                for (int i = (sources.length - 2); i >= 0; i--) {
                    if (!first) {
                        // \u2B05 is an arrow <--
                        buffer.append(" \u2B05 ");
                    }
                    first = false;
                    buffer.append(sources[i]);
                }
                tbody.tr().td(entry.getName()).td(entry.getValue()).td(buffer.toString())._();
            }
            tbody._().tfoot().tr().th().input("search_init").$type(InputType.text).$name("key").$value("key")._()._().th().input("search_init").$type(InputType.text).$name("value").$value("value")._()._().th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain")._()._()._()._()._();
        } catch (IOException e) {
            LOG.error("Error while reading " + confPath, e);
            html.p()._("Sorry got an error while reading conf file. ", confPath);
        }
    }
}

18 Source : HsJobsBlock.java
with Apache License 2.0
from NJUJYB

/**
 * Render all of the jobs that the history server is aware of.
 */
public clreplaced HsJobsBlock extends HtmlBlock {

    final AppContext appContext;

    final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");

    @Inject
    HsJobsBlock(AppContext appCtx) {
        appContext = appCtx;
    }

    /*
   * (non-Javadoc)
   * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
   */
    @Override
    protected void render(Block html) {
        TBODY<TABLE<Hamlet>> tbody = html.h2("Retired Jobs").table("#jobs").thead().tr().th("Submit Time").th("Start Time").th("Finish Time").th(".id", "Job ID").th(".name", "Name").th("User").th("Queue").th(".state", "State").th("Maps Total").th("Maps Completed").th("Reduces Total").th("Reduces Completed")._()._().tbody();
        LOG.info("Getting list of all Jobs.");
        // Write all the data into a JavaScript array of arrays for JQuery
        // DataTables to display
        StringBuilder jobsTableData = new StringBuilder("[\n");
        for (Job j : appContext.getAllJobs().values()) {
            JobInfo job = new JobInfo(j);
            jobsTableData.append("[\"").append(dateFormat.format(new Date(job.getSubmitTime()))).append("\",\"").append(dateFormat.format(new Date(job.getStartTime()))).append("\",\"").append(dateFormat.format(new Date(job.getFinishTime()))).append("\",\"").append("<a href='").append(url("job", job.getId())).append("'>").append(job.getId()).append("</a>\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getUserName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getQueueName()))).append("\",\"").append(job.getState()).append("\",\"").append(String.valueOf(job.getMapsTotal())).append("\",\"").append(String.valueOf(job.getMapsCompleted())).append("\",\"").append(String.valueOf(job.getReducesTotal())).append("\",\"").append(String.valueOf(job.getReducesCompleted())).append("\"],\n");
        }
        // Remove the last comma and close off the array of arrays
        if (jobsTableData.charAt(jobsTableData.length() - 2) == ',') {
            jobsTableData.delete(jobsTableData.length() - 2, jobsTableData.length() - 1);
        }
        jobsTableData.append("]");
        html.script().$type("text/javascript")._("var jobsTableData=" + jobsTableData)._();
        tbody._().tfoot().tr().th().input("search_init").$type(InputType.text).$name("submit_time").$value("Submit Time")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Start Time")._()._().th().input("search_init").$type(InputType.text).$name("finish_time").$value("Finish Time")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Job ID")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Name")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("User")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Queue")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("State")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Total")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Completed")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Total")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Completed")._()._()._()._()._();
    }
}

18 Source : HsJobsBlock.java
with Apache License 2.0
from naver

/**
 * Render all of the jobs that the history server is aware of.
 */
public clreplaced HsJobsBlock extends HtmlBlock {

    final AppContext appContext;

    final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");

    @Inject
    HsJobsBlock(AppContext appCtx) {
        appContext = appCtx;
    }

    /*
   * (non-Javadoc)
   * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
   */
    @Override
    protected void render(Block html) {
        TBODY<TABLE<Hamlet>> tbody = html.h2("Retired Jobs").table("#jobs").thead().tr().th("Submit Time").th("Start Time").th("Finish Time").th(".id", "Job ID").th(".name", "Name").th("User").th("Queue").th(".state", "State").th("Maps Total").th("Maps Completed").th("Reduces Total").th("Reduces Completed")._()._().tbody();
        LOG.info("Getting list of all Jobs.");
        // Write all the data into a JavaScript array of arrays for JQuery
        // DataTables to display
        StringBuilder jobsTableData = new StringBuilder("[\n");
        for (Job j : appContext.getAllJobs().values()) {
            JobInfo job = new JobInfo(j);
            jobsTableData.append("[\"").append(dateFormat.format(new Date(job.getSubmitTime()))).append("\",\"").append(dateFormat.format(new Date(job.getStartTime()))).append("\",\"").append(dateFormat.format(new Date(job.getFinishTime()))).append("\",\"").append("<a target='_blank' href='").append(url("job", job.getId())).append("'>").append(job.getId()).append("</a>\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getUserName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getQueueName()))).append("\",\"").append(job.getState()).append("\",\"").append(String.valueOf(job.getMapsTotal())).append("\",\"").append(String.valueOf(job.getMapsCompleted())).append("\",\"").append(String.valueOf(job.getReducesTotal())).append("\",\"").append(String.valueOf(job.getReducesCompleted())).append("\"],\n");
        }
        // Remove the last comma and close off the array of arrays
        if (jobsTableData.charAt(jobsTableData.length() - 2) == ',') {
            jobsTableData.delete(jobsTableData.length() - 2, jobsTableData.length() - 1);
        }
        jobsTableData.append("]");
        html.script().$type("text/javascript")._("var jobsTableData=" + jobsTableData)._();
        tbody._().tfoot().tr().th().input("search_init").$type(InputType.text).$name("submit_time").$value("Submit Time")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Start Time")._()._().th().input("search_init").$type(InputType.text).$name("finish_time").$value("Finish Time")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Job ID")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Name")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("User")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Queue")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("State")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Total")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Maps Completed")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Total")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Reduces Completed")._()._()._()._()._();
    }
}

17 Source : TestAMWebServicesTasks.java
with Apache License 2.0
from NJUJYB

/**
 * Test the app master web service Rest API for getting tasks, a specific task,
 * and task counters.
 *
 * /ws/v1/mapreduce/jobs/{jobid}/tasks
 * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}
 * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/counters
 */
public clreplaced TestAMWebServicesTasks extends JerseyTest {

    private static Configuration conf = new Configuration();

    private static AppContext appContext;

    private Injector injector = Guice.createInjector(new ServletModule() {

        @Override
        protected void configureServlets() {
            appContext = new MockAppContext(0, 1, 2, 1);
            bind(JAXBContextResolver.clreplaced);
            bind(AMWebServices.clreplaced);
            bind(GenericExceptionHandler.clreplaced);
            bind(AppContext.clreplaced).toInstance(appContext);
            bind(Configuration.clreplaced).toInstance(conf);
            serve("/*").with(GuiceContainer.clreplaced);
        }
    });

    public clreplaced GuiceServletConfig extends GuiceServletContextListener {

        @Override
        protected Injector getInjector() {
            return injector;
        }
    }

    @Before
    @Override
    public void setUp() throws Exception {
        super.setUp();
    }

    public TestAMWebServicesTasks() {
        super(new WebAppDescriptor.Builder("org.apache.hadoop.mapreduce.v2.app.webapp").contextListenerClreplaced(GuiceServletConfig.clreplaced).filterClreplaced(com.google.inject.servlet.GuiceFilter.clreplaced).contextPath("jersey-guice-filter").servletPath("/").build());
    }

    @Test
    public void testTasks() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject tasks = json.getJSONObject("tasks");
            JSONArray arr = tasks.getJSONArray("task");
            replacedertEquals("incorrect number of elements", 2, arr.length());
            verifyAMTask(arr, jobsMap.get(id), null);
        }
    }

    @Test
    public void testTasksDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject tasks = json.getJSONObject("tasks");
            JSONArray arr = tasks.getJSONArray("task");
            replacedertEquals("incorrect number of elements", 2, arr.length());
            verifyAMTask(arr, jobsMap.get(id), null);
        }
    }

    @Test
    public void testTasksSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject tasks = json.getJSONObject("tasks");
            JSONArray arr = tasks.getJSONArray("task");
            replacedertEquals("incorrect number of elements", 2, arr.length());
            verifyAMTask(arr, jobsMap.get(id), null);
        }
    }

    @Test
    public void testTasksXML() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String xml = response.getEnreplacedy(String.clreplaced);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(xml));
            Doreplacedent dom = db.parse(is);
            NodeList tasks = dom.getElementsByTagName("tasks");
            replacedertEquals("incorrect number of elements", 1, tasks.getLength());
            NodeList task = dom.getElementsByTagName("task");
            verifyAMTaskXML(task, jobsMap.get(id));
        }
    }

    @Test
    public void testTasksQueryMap() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String type = "m";
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type", type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject tasks = json.getJSONObject("tasks");
            JSONArray arr = tasks.getJSONArray("task");
            replacedertEquals("incorrect number of elements", 1, arr.length());
            verifyAMTask(arr, jobsMap.get(id), type);
        }
    }

    @Test
    public void testTasksQueryReduce() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String type = "r";
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type", type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject tasks = json.getJSONObject("tasks");
            JSONArray arr = tasks.getJSONArray("task");
            replacedertEquals("incorrect number of elements", 1, arr.length());
            verifyAMTask(arr, jobsMap.get(id), type);
        }
    }

    @Test
    public void testTasksQueryInvalid() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            // tasktype must be exactly either "m" or "r"
            String tasktype = "reduce";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type", tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: tasktype must be either m or r", message);
                WebServicesTestUtils.checkStringMatch("exception type", "BadRequestException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.BadRequestException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskId() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("task");
                verifyAMSingleTask(info, task);
            }
        }
    }

    @Test
    public void testTaskIdSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("task");
                verifyAMSingleTask(info, task);
            }
        }
    }

    @Test
    public void testTaskIdDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("task");
                verifyAMSingleTask(info, task);
            }
        }
    }

    @Test
    public void testTaskIdBogus() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String tid = "bogustaskid";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed", message);
                WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskIdNonExist() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String tid = "task_0_0000_m_000000";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: task not found with id task_0_0000_m_000000", message);
                WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskIdInvalid() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String tid = "task_0_0000_d_000000";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.", message);
                WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskIdInvalid2() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String tid = "task_0_m_000000";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: TaskId string : " + "task_0_m_000000 is not properly formed", message);
                WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskIdInvalid3() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            String tid = "task_0_0000_m";
            try {
                r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.clreplaced);
                fail("should have thrown exception on invalid uri");
            } catch (UniformInterfaceException ue) {
                ClientResponse response = ue.getResponse();
                replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                JSONObject exception = msg.getJSONObject("RemoteException");
                replacedertEquals("incorrect number of elements", 3, exception.length());
                String message = exception.getString("message");
                String type = exception.getString("exception");
                String clreplacedname = exception.getString("javaClreplacedName");
                WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed", message);
                WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
            }
        }
    }

    @Test
    public void testTaskIdXML() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
                String xml = response.getEnreplacedy(String.clreplaced);
                DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
                DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
                InputSource is = new InputSource();
                is.setCharacterStream(new StringReader(xml));
                Doreplacedent dom = db.parse(is);
                NodeList nodes = dom.getElementsByTagName("task");
                for (int i = 0; i < nodes.getLength(); i++) {
                    Element element = (Element) nodes.item(i);
                    verifyAMSingleTaskXML(element, task);
                }
            }
        }
    }

    public void verifyAMSingleTask(JSONObject info, Task task) throws JSONException {
        replacedertEquals("incorrect number of elements", 9, info.length());
        verifyTaskGeneric(task, info.getString("id"), info.getString("state"), info.getString("type"), info.getString("successfulAttempt"), info.getLong("startTime"), info.getLong("finishTime"), info.getLong("elapsedTime"), (float) info.getDouble("progress"), info.getString("status"));
    }

    public void verifyAMTask(JSONArray arr, Job job, String type) throws JSONException {
        for (Task task : job.getTasks().values()) {
            TaskId id = task.getID();
            String tid = MRApps.toString(id);
            Boolean found = false;
            if (type != null && task.getType() == MRApps.taskType(type)) {
                for (int i = 0; i < arr.length(); i++) {
                    JSONObject info = arr.getJSONObject(i);
                    if (tid.matches(info.getString("id"))) {
                        found = true;
                        verifyAMSingleTask(info, task);
                    }
                }
                replacedertTrue("task with id: " + tid + " not in web service output", found);
            }
        }
    }

    public void verifyTaskGeneric(Task task, String id, String state, String type, String successfulAttempt, long startTime, long finishTime, long elapsedTime, float progress, String status) {
        TaskId taskid = task.getID();
        String tid = MRApps.toString(taskid);
        TaskReport report = task.getReport();
        WebServicesTestUtils.checkStringMatch("id", tid, id);
        WebServicesTestUtils.checkStringMatch("type", task.getType().toString(), type);
        WebServicesTestUtils.checkStringMatch("state", report.getTaskState().toString(), state);
        // not easily checked without duplicating logic, just make sure its here
        replacedertNotNull("successfulAttempt null", successfulAttempt);
        replacedertEquals("startTime wrong", report.getStartTime(), startTime);
        replacedertEquals("finishTime wrong", report.getFinishTime(), finishTime);
        replacedertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
        replacedertEquals("progress wrong", report.getProgress() * 100, progress, 1e-3f);
        replacedertEquals("status wrong", report.getStatus(), status);
    }

    public void verifyAMSingleTaskXML(Element element, Task task) {
        verifyTaskGeneric(task, WebServicesTestUtils.getXmlString(element, "id"), WebServicesTestUtils.getXmlString(element, "state"), WebServicesTestUtils.getXmlString(element, "type"), WebServicesTestUtils.getXmlString(element, "successfulAttempt"), WebServicesTestUtils.getXmlLong(element, "startTime"), WebServicesTestUtils.getXmlLong(element, "finishTime"), WebServicesTestUtils.getXmlLong(element, "elapsedTime"), WebServicesTestUtils.getXmlFloat(element, "progress"), WebServicesTestUtils.getXmlString(element, "status"));
    }

    public void verifyAMTaskXML(NodeList nodes, Job job) {
        replacedertEquals("incorrect number of elements", 2, nodes.getLength());
        for (Task task : job.getTasks().values()) {
            TaskId id = task.getID();
            String tid = MRApps.toString(id);
            Boolean found = false;
            for (int i = 0; i < nodes.getLength(); i++) {
                Element element = (Element) nodes.item(i);
                if (tid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
                    found = true;
                    verifyAMSingleTaskXML(element, task);
                }
            }
            replacedertTrue("task with id: " + tid + " not in web service output", found);
        }
    }

    @Test
    public void testTaskIdCounters() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("jobTaskCounters");
                verifyAMJobTaskCounters(info, task);
            }
        }
    }

    @Test
    public void testTaskIdCountersSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("jobTaskCounters");
                verifyAMJobTaskCounters(info, task);
            }
        }
    }

    @Test
    public void testTaskIdCountersDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                replacedertEquals("incorrect number of elements", 1, json.length());
                JSONObject info = json.getJSONObject("jobTaskCounters");
                verifyAMJobTaskCounters(info, task);
            }
        }
    }

    @Test
    public void testJobTaskCountersXML() throws Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
                String xml = response.getEnreplacedy(String.clreplaced);
                DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
                DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
                InputSource is = new InputSource();
                is.setCharacterStream(new StringReader(xml));
                Doreplacedent dom = db.parse(is);
                NodeList info = dom.getElementsByTagName("jobTaskCounters");
                verifyAMTaskCountersXML(info, task);
            }
        }
    }

    public void verifyAMJobTaskCounters(JSONObject info, Task task) throws JSONException {
        replacedertEquals("incorrect number of elements", 2, info.length());
        WebServicesTestUtils.checkStringMatch("id", MRApps.toString(task.getID()), info.getString("id"));
        // just do simple verification of fields - not data is correct
        // in the fields
        JSONArray counterGroups = info.getJSONArray("taskCounterGroup");
        for (int i = 0; i < counterGroups.length(); i++) {
            JSONObject counterGroup = counterGroups.getJSONObject(i);
            String name = counterGroup.getString("counterGroupName");
            replacedertTrue("name not set", (name != null && !name.isEmpty()));
            JSONArray counters = counterGroup.getJSONArray("counter");
            for (int j = 0; j < counters.length(); j++) {
                JSONObject counter = counters.getJSONObject(j);
                String counterName = counter.getString("name");
                replacedertTrue("name not set", (counterName != null && !counterName.isEmpty()));
                long value = counter.getLong("value");
                replacedertTrue("value  >= 0", value >= 0);
            }
        }
    }

    public void verifyAMTaskCountersXML(NodeList nodes, Task task) {
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            WebServicesTestUtils.checkStringMatch("id", MRApps.toString(task.getID()), WebServicesTestUtils.getXmlString(element, "id"));
            // just do simple verification of fields - not data is correct
            // in the fields
            NodeList groups = element.getElementsByTagName("taskCounterGroup");
            for (int j = 0; j < groups.getLength(); j++) {
                Element counters = (Element) groups.item(j);
                replacedertNotNull("should have counters in the web service info", counters);
                String name = WebServicesTestUtils.getXmlString(counters, "counterGroupName");
                replacedertTrue("name not set", (name != null && !name.isEmpty()));
                NodeList counterArr = counters.getElementsByTagName("counter");
                for (int z = 0; z < counterArr.getLength(); z++) {
                    Element counter = (Element) counterArr.item(z);
                    String counterName = WebServicesTestUtils.getXmlString(counter, "name");
                    replacedertTrue("counter name not set", (counterName != null && !counterName.isEmpty()));
                    long value = WebServicesTestUtils.getXmlLong(counter, "value");
                    replacedertTrue("value not >= 0", value >= 0);
                }
            }
        }
    }
}

17 Source : TestAMWebServicesJobs.java
with Apache License 2.0
from NJUJYB

/**
 * Test the app master web service Rest API for getting jobs, a specific job,
 * and job counters.
 *
 * /ws/v1/mapreduce/jobs
 * /ws/v1/mapreduce/jobs/{jobid}
 * /ws/v1/mapreduce/jobs/{jobid}/counters
 * /ws/v1/mapreduce/jobs/{jobid}/jobattempts
 */
public clreplaced TestAMWebServicesJobs extends JerseyTest {

    private static Configuration conf = new Configuration();

    private static AppContext appContext;

    private Injector injector = Guice.createInjector(new ServletModule() {

        @Override
        protected void configureServlets() {
            appContext = new MockAppContext(0, 1, 2, 1);
            bind(JAXBContextResolver.clreplaced);
            bind(AMWebServices.clreplaced);
            bind(GenericExceptionHandler.clreplaced);
            bind(AppContext.clreplaced).toInstance(appContext);
            bind(Configuration.clreplaced).toInstance(conf);
            serve("/*").with(GuiceContainer.clreplaced);
        }
    });

    public clreplaced GuiceServletConfig extends GuiceServletContextListener {

        @Override
        protected Injector getInjector() {
            return injector;
        }
    }

    @Before
    @Override
    public void setUp() throws Exception {
        super.setUp();
    }

    public TestAMWebServicesJobs() {
        super(new WebAppDescriptor.Builder("org.apache.hadoop.mapreduce.v2.app.webapp").contextListenerClreplaced(GuiceServletConfig.clreplaced).filterClreplaced(com.google.inject.servlet.GuiceFilter.clreplaced).contextPath("jersey-guice-filter").servletPath("/").build());
    }

    @Test
    public void testJobs() throws JSONException, Exception {
        WebResource r = resource();
        ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
        replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
        replacedertEquals("incorrect number of elements", 1, json.length());
        JSONObject jobs = json.getJSONObject("jobs");
        JSONArray arr = jobs.getJSONArray("job");
        JSONObject info = arr.getJSONObject(0);
        Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
        verifyAMJob(info, job);
    }

    @Test
    public void testJobsSlash() throws JSONException, Exception {
        WebResource r = resource();
        ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
        replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
        replacedertEquals("incorrect number of elements", 1, json.length());
        JSONObject jobs = json.getJSONObject("jobs");
        JSONArray arr = jobs.getJSONArray("job");
        JSONObject info = arr.getJSONObject(0);
        Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
        verifyAMJob(info, job);
    }

    @Test
    public void testJobsDefault() throws JSONException, Exception {
        WebResource r = resource();
        ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").get(ClientResponse.clreplaced);
        replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
        replacedertEquals("incorrect number of elements", 1, json.length());
        JSONObject jobs = json.getJSONObject("jobs");
        JSONArray arr = jobs.getJSONArray("job");
        JSONObject info = arr.getJSONObject(0);
        Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
        verifyAMJob(info, job);
    }

    @Test
    public void testJobsXML() throws Exception {
        WebResource r = resource();
        ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
        replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
        String xml = response.getEnreplacedy(String.clreplaced);
        DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
        DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
        InputSource is = new InputSource();
        is.setCharacterStream(new StringReader(xml));
        Doreplacedent dom = db.parse(is);
        NodeList jobs = dom.getElementsByTagName("jobs");
        replacedertEquals("incorrect number of elements", 1, jobs.getLength());
        NodeList job = dom.getElementsByTagName("job");
        replacedertEquals("incorrect number of elements", 1, job.getLength());
        verifyAMJobXML(job, appContext);
    }

    @Test
    public void testJobId() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("job");
            verifyAMJob(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobIdSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("job");
            verifyAMJob(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobIdDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("job");
            verifyAMJob(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobIdNonExist() throws JSONException, Exception {
        WebResource r = resource();
        try {
            r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.clreplaced);
            fail("should have thrown exception on invalid uri");
        } catch (UniformInterfaceException ue) {
            ClientResponse response = ue.getResponse();
            replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
            JSONObject exception = msg.getJSONObject("RemoteException");
            replacedertEquals("incorrect number of elements", 3, exception.length());
            String message = exception.getString("message");
            String type = exception.getString("exception");
            String clreplacedname = exception.getString("javaClreplacedName");
            WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: job, job_0_1234, is not found", message);
            WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
            WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
        }
    }

    @Test
    public void testJobIdInvalid() throws JSONException, Exception {
        WebResource r = resource();
        try {
            r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.clreplaced);
            fail("should have thrown exception on invalid uri");
        } catch (UniformInterfaceException ue) {
            ClientResponse response = ue.getResponse();
            replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
            JSONObject exception = msg.getJSONObject("RemoteException");
            replacedertEquals("incorrect number of elements", 3, exception.length());
            String message = exception.getString("message");
            String type = exception.getString("exception");
            String clreplacedname = exception.getString("javaClreplacedName");
            verifyJobIdInvalid(message, type, clreplacedname);
        }
    }

    // verify the exception output default is JSON
    @Test
    public void testJobIdInvalidDefault() throws JSONException, Exception {
        WebResource r = resource();
        try {
            r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.clreplaced);
            fail("should have thrown exception on invalid uri");
        } catch (UniformInterfaceException ue) {
            ClientResponse response = ue.getResponse();
            replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
            JSONObject exception = msg.getJSONObject("RemoteException");
            replacedertEquals("incorrect number of elements", 3, exception.length());
            String message = exception.getString("message");
            String type = exception.getString("exception");
            String clreplacedname = exception.getString("javaClreplacedName");
            verifyJobIdInvalid(message, type, clreplacedname);
        }
    }

    // test that the exception output works in XML
    @Test
    public void testJobIdInvalidXML() throws JSONException, Exception {
        WebResource r = resource();
        try {
            r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.clreplaced);
            fail("should have thrown exception on invalid uri");
        } catch (UniformInterfaceException ue) {
            ClientResponse response = ue.getResponse();
            replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String msg = response.getEnreplacedy(String.clreplaced);
            System.out.println(msg);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(msg));
            Doreplacedent dom = db.parse(is);
            NodeList nodes = dom.getElementsByTagName("RemoteException");
            Element element = (Element) nodes.item(0);
            String message = WebServicesTestUtils.getXmlString(element, "message");
            String type = WebServicesTestUtils.getXmlString(element, "exception");
            String clreplacedname = WebServicesTestUtils.getXmlString(element, "javaClreplacedName");
            verifyJobIdInvalid(message, type, clreplacedname);
        }
    }

    private void verifyJobIdInvalid(String message, String type, String clreplacedname) {
        WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: JobId string : job_foo is not properly formed", message);
        WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
        WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
    }

    @Test
    public void testJobIdInvalidBogus() throws JSONException, Exception {
        WebResource r = resource();
        try {
            r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.clreplaced);
            fail("should have thrown exception on invalid uri");
        } catch (UniformInterfaceException ue) {
            ClientResponse response = ue.getResponse();
            replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
            JSONObject exception = msg.getJSONObject("RemoteException");
            replacedertEquals("incorrect number of elements", 3, exception.length());
            String message = exception.getString("message");
            String type = exception.getString("exception");
            String clreplacedname = exception.getString("javaClreplacedName");
            WebServicesTestUtils.checkStringMatch("exception message", "java.lang.Exception: JobId string : bogusfoo is not properly formed", message);
            WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
            WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
        }
    }

    @Test
    public void testJobIdXML() throws Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String xml = response.getEnreplacedy(String.clreplaced);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(xml));
            Doreplacedent dom = db.parse(is);
            NodeList job = dom.getElementsByTagName("job");
            verifyAMJobXML(job, appContext);
        }
    }

    public void verifyAMJob(JSONObject info, Job job) throws JSONException {
        replacedertEquals("incorrect number of elements", 30, info.length());
        // everyone access fields
        verifyAMJobGeneric(job, info.getString("id"), info.getString("user"), info.getString("name"), info.getString("state"), info.getLong("startTime"), info.getLong("finishTime"), info.getLong("elapsedTime"), info.getInt("mapsTotal"), info.getInt("mapsCompleted"), info.getInt("reducesTotal"), info.getInt("reducesCompleted"), (float) info.getDouble("reduceProgress"), (float) info.getDouble("mapProgress"));
        String diagnostics = "";
        if (info.has("diagnostics")) {
            diagnostics = info.getString("diagnostics");
        }
        // restricted access fields - if security and acls set
        verifyAMJobGenericSecure(job, info.getInt("mapsPending"), info.getInt("mapsRunning"), info.getInt("reducesPending"), info.getInt("reducesRunning"), info.getBoolean("uberized"), diagnostics, info.getInt("newReduceAttempts"), info.getInt("runningReduceAttempts"), info.getInt("failedReduceAttempts"), info.getInt("killedReduceAttempts"), info.getInt("successfulReduceAttempts"), info.getInt("newMapAttempts"), info.getInt("runningMapAttempts"), info.getInt("failedMapAttempts"), info.getInt("killedMapAttempts"), info.getInt("successfulMapAttempts"));
        Map<JobACL, AccessControlList> allacls = job.getJobACLs();
        if (allacls != null) {
            for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
                String expectName = entry.getKey().getAclName();
                String expectValue = entry.getValue().getAclString();
                Boolean found = false;
                // make sure ws includes it
                if (info.has("acls")) {
                    JSONArray arr = info.getJSONArray("acls");
                    for (int i = 0; i < arr.length(); i++) {
                        JSONObject aclInfo = arr.getJSONObject(i);
                        if (expectName.matches(aclInfo.getString("name"))) {
                            found = true;
                            WebServicesTestUtils.checkStringMatch("value", expectValue, aclInfo.getString("value"));
                        }
                    }
                } else {
                    fail("should have acls in the web service info");
                }
                replacedertTrue("acl: " + expectName + " not found in webservice output", found);
            }
        }
    }

    public void verifyAMJobXML(NodeList nodes, AppContext appContext) {
        replacedertEquals("incorrect number of elements", 1, nodes.getLength());
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            Job job = appContext.getJob(MRApps.toJobID(WebServicesTestUtils.getXmlString(element, "id")));
            replacedertNotNull("Job not found - output incorrect", job);
            verifyAMJobGeneric(job, WebServicesTestUtils.getXmlString(element, "id"), WebServicesTestUtils.getXmlString(element, "user"), WebServicesTestUtils.getXmlString(element, "name"), WebServicesTestUtils.getXmlString(element, "state"), WebServicesTestUtils.getXmlLong(element, "startTime"), WebServicesTestUtils.getXmlLong(element, "finishTime"), WebServicesTestUtils.getXmlLong(element, "elapsedTime"), WebServicesTestUtils.getXmlInt(element, "mapsTotal"), WebServicesTestUtils.getXmlInt(element, "mapsCompleted"), WebServicesTestUtils.getXmlInt(element, "reducesTotal"), WebServicesTestUtils.getXmlInt(element, "reducesCompleted"), WebServicesTestUtils.getXmlFloat(element, "reduceProgress"), WebServicesTestUtils.getXmlFloat(element, "mapProgress"));
            // restricted access fields - if security and acls set
            verifyAMJobGenericSecure(job, WebServicesTestUtils.getXmlInt(element, "mapsPending"), WebServicesTestUtils.getXmlInt(element, "mapsRunning"), WebServicesTestUtils.getXmlInt(element, "reducesPending"), WebServicesTestUtils.getXmlInt(element, "reducesRunning"), WebServicesTestUtils.getXmlBoolean(element, "uberized"), WebServicesTestUtils.getXmlString(element, "diagnostics"), WebServicesTestUtils.getXmlInt(element, "newReduceAttempts"), WebServicesTestUtils.getXmlInt(element, "runningReduceAttempts"), WebServicesTestUtils.getXmlInt(element, "failedReduceAttempts"), WebServicesTestUtils.getXmlInt(element, "killedReduceAttempts"), WebServicesTestUtils.getXmlInt(element, "successfulReduceAttempts"), WebServicesTestUtils.getXmlInt(element, "newMapAttempts"), WebServicesTestUtils.getXmlInt(element, "runningMapAttempts"), WebServicesTestUtils.getXmlInt(element, "failedMapAttempts"), WebServicesTestUtils.getXmlInt(element, "killedMapAttempts"), WebServicesTestUtils.getXmlInt(element, "successfulMapAttempts"));
            Map<JobACL, AccessControlList> allacls = job.getJobACLs();
            if (allacls != null) {
                for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
                    String expectName = entry.getKey().getAclName();
                    String expectValue = entry.getValue().getAclString();
                    Boolean found = false;
                    // make sure ws includes it
                    NodeList id = element.getElementsByTagName("acls");
                    if (id != null) {
                        for (int j = 0; j < id.getLength(); j++) {
                            Element aclElem = (Element) id.item(j);
                            if (aclElem == null) {
                                fail("should have acls in the web service info");
                            }
                            if (expectName.matches(WebServicesTestUtils.getXmlString(aclElem, "name"))) {
                                found = true;
                                WebServicesTestUtils.checkStringMatch("value", expectValue, WebServicesTestUtils.getXmlString(aclElem, "value"));
                            }
                        }
                    } else {
                        fail("should have acls in the web service info");
                    }
                    replacedertTrue("acl: " + expectName + " not found in webservice output", found);
                }
            }
        }
    }

    public void verifyAMJobGeneric(Job job, String id, String user, String name, String state, long startTime, long finishTime, long elapsedTime, int mapsTotal, int mapsCompleted, int reducesTotal, int reducesCompleted, float reduceProgress, float mapProgress) {
        JobReport report = job.getReport();
        WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()), id);
        WebServicesTestUtils.checkStringMatch("user", job.getUserName().toString(), user);
        WebServicesTestUtils.checkStringMatch("name", job.getName(), name);
        WebServicesTestUtils.checkStringMatch("state", job.getState().toString(), state);
        replacedertEquals("startTime incorrect", report.getStartTime(), startTime);
        replacedertEquals("finishTime incorrect", report.getFinishTime(), finishTime);
        replacedertEquals("elapsedTime incorrect", Times.elapsed(report.getStartTime(), report.getFinishTime()), elapsedTime);
        replacedertEquals("mapsTotal incorrect", job.getTotalMaps(), mapsTotal);
        replacedertEquals("mapsCompleted incorrect", job.getCompletedMaps(), mapsCompleted);
        replacedertEquals("reducesTotal incorrect", job.getTotalReduces(), reducesTotal);
        replacedertEquals("reducesCompleted incorrect", job.getCompletedReduces(), reducesCompleted);
        replacedertEquals("mapProgress incorrect", report.getMapProgress() * 100, mapProgress, 0);
        replacedertEquals("reduceProgress incorrect", report.getReduceProgress() * 100, reduceProgress, 0);
    }

    public void verifyAMJobGenericSecure(Job job, int mapsPending, int mapsRunning, int reducesPending, int reducesRunning, Boolean uberized, String diagnostics, int newReduceAttempts, int runningReduceAttempts, int failedReduceAttempts, int killedReduceAttempts, int successfulReduceAttempts, int newMapAttempts, int runningMapAttempts, int failedMapAttempts, int killedMapAttempts, int successfulMapAttempts) {
        String diagString = "";
        List<String> diagList = job.getDiagnostics();
        if (diagList != null && !diagList.isEmpty()) {
            StringBuffer b = new StringBuffer();
            for (String diag : diagList) {
                b.append(diag);
            }
            diagString = b.toString();
        }
        WebServicesTestUtils.checkStringMatch("diagnostics", diagString, diagnostics);
        replacedertEquals("isUber incorrect", job.isUber(), uberized);
        // unfortunately the following fields are all calculated in JobInfo
        // so not easily accessible without doing all the calculations again.
        // For now just make sure they are present.
        replacedertTrue("mapsPending not >= 0", mapsPending >= 0);
        replacedertTrue("mapsRunning not >= 0", mapsRunning >= 0);
        replacedertTrue("reducesPending not >= 0", reducesPending >= 0);
        replacedertTrue("reducesRunning not >= 0", reducesRunning >= 0);
        replacedertTrue("newReduceAttempts not >= 0", newReduceAttempts >= 0);
        replacedertTrue("runningReduceAttempts not >= 0", runningReduceAttempts >= 0);
        replacedertTrue("failedReduceAttempts not >= 0", failedReduceAttempts >= 0);
        replacedertTrue("killedReduceAttempts not >= 0", killedReduceAttempts >= 0);
        replacedertTrue("successfulReduceAttempts not >= 0", successfulReduceAttempts >= 0);
        replacedertTrue("newMapAttempts not >= 0", newMapAttempts >= 0);
        replacedertTrue("runningMapAttempts not >= 0", runningMapAttempts >= 0);
        replacedertTrue("failedMapAttempts not >= 0", failedMapAttempts >= 0);
        replacedertTrue("killedMapAttempts not >= 0", killedMapAttempts >= 0);
        replacedertTrue("successfulMapAttempts not >= 0", successfulMapAttempts >= 0);
    }

    @Test
    public void testJobCounters() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobCounters");
            verifyAMJobCounters(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobCountersSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobCounters");
            verifyAMJobCounters(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobCountersDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobCounters");
            verifyAMJobCounters(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobCountersXML() throws Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String xml = response.getEnreplacedy(String.clreplaced);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(xml));
            Doreplacedent dom = db.parse(is);
            NodeList info = dom.getElementsByTagName("jobCounters");
            verifyAMJobCountersXML(info, jobsMap.get(id));
        }
    }

    public void verifyAMJobCounters(JSONObject info, Job job) throws JSONException {
        replacedertEquals("incorrect number of elements", 2, info.length());
        WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()), info.getString("id"));
        // just do simple verification of fields - not data is correct
        // in the fields
        JSONArray counterGroups = info.getJSONArray("counterGroup");
        for (int i = 0; i < counterGroups.length(); i++) {
            JSONObject counterGroup = counterGroups.getJSONObject(i);
            String name = counterGroup.getString("counterGroupName");
            replacedertTrue("name not set", (name != null && !name.isEmpty()));
            JSONArray counters = counterGroup.getJSONArray("counter");
            for (int j = 0; j < counters.length(); j++) {
                JSONObject counter = counters.getJSONObject(j);
                String counterName = counter.getString("name");
                replacedertTrue("counter name not set", (counterName != null && !counterName.isEmpty()));
                long mapValue = counter.getLong("mapCounterValue");
                replacedertTrue("mapCounterValue  >= 0", mapValue >= 0);
                long reduceValue = counter.getLong("reduceCounterValue");
                replacedertTrue("reduceCounterValue  >= 0", reduceValue >= 0);
                long totalValue = counter.getLong("totalCounterValue");
                replacedertTrue("totalCounterValue  >= 0", totalValue >= 0);
            }
        }
    }

    public void verifyAMJobCountersXML(NodeList nodes, Job job) {
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            replacedertNotNull("Job not found - output incorrect", job);
            WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()), WebServicesTestUtils.getXmlString(element, "id"));
            // just do simple verification of fields - not data is correct
            // in the fields
            NodeList groups = element.getElementsByTagName("counterGroup");
            for (int j = 0; j < groups.getLength(); j++) {
                Element counters = (Element) groups.item(j);
                replacedertNotNull("should have counters in the web service info", counters);
                String name = WebServicesTestUtils.getXmlString(counters, "counterGroupName");
                replacedertTrue("name not set", (name != null && !name.isEmpty()));
                NodeList counterArr = counters.getElementsByTagName("counter");
                for (int z = 0; z < counterArr.getLength(); z++) {
                    Element counter = (Element) counterArr.item(z);
                    String counterName = WebServicesTestUtils.getXmlString(counter, "name");
                    replacedertTrue("counter name not set", (counterName != null && !counterName.isEmpty()));
                    long mapValue = WebServicesTestUtils.getXmlLong(counter, "mapCounterValue");
                    replacedertTrue("mapCounterValue not >= 0", mapValue >= 0);
                    long reduceValue = WebServicesTestUtils.getXmlLong(counter, "reduceCounterValue");
                    replacedertTrue("reduceCounterValue  >= 0", reduceValue >= 0);
                    long totalValue = WebServicesTestUtils.getXmlLong(counter, "totalCounterValue");
                    replacedertTrue("totalCounterValue  >= 0", totalValue >= 0);
                }
            }
        }
    }

    @Test
    public void testJobAttempts() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobAttempts");
            verifyJobAttempts(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobAttemptsSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobAttempts");
            verifyJobAttempts(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobAttemptsDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("jobAttempts");
            verifyJobAttempts(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobAttemptsXML() throws Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String xml = response.getEnreplacedy(String.clreplaced);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(xml));
            Doreplacedent dom = db.parse(is);
            NodeList attempts = dom.getElementsByTagName("jobAttempts");
            replacedertEquals("incorrect number of elements", 1, attempts.getLength());
            NodeList info = dom.getElementsByTagName("jobAttempt");
            verifyJobAttemptsXML(info, jobsMap.get(id));
        }
    }

    public void verifyJobAttempts(JSONObject info, Job job) throws JSONException {
        JSONArray attempts = info.getJSONArray("jobAttempt");
        replacedertEquals("incorrect number of elements", 2, attempts.length());
        for (int i = 0; i < attempts.length(); i++) {
            JSONObject attempt = attempts.getJSONObject(i);
            verifyJobAttemptsGeneric(job, attempt.getString("nodeHttpAddress"), attempt.getString("nodeId"), attempt.getInt("id"), attempt.getLong("startTime"), attempt.getString("containerId"), attempt.getString("logsLink"));
        }
    }

    public void verifyJobAttemptsXML(NodeList nodes, Job job) {
        replacedertEquals("incorrect number of elements", 2, nodes.getLength());
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            verifyJobAttemptsGeneric(job, WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"), WebServicesTestUtils.getXmlString(element, "nodeId"), WebServicesTestUtils.getXmlInt(element, "id"), WebServicesTestUtils.getXmlLong(element, "startTime"), WebServicesTestUtils.getXmlString(element, "containerId"), WebServicesTestUtils.getXmlString(element, "logsLink"));
        }
    }

    public void verifyJobAttemptsGeneric(Job job, String nodeHttpAddress, String nodeId, int id, long startTime, String containerId, String logsLink) {
        boolean attemptFound = false;
        for (AMInfo amInfo : job.getAMInfos()) {
            if (amInfo.getAppAttemptId().getAttemptId() == id) {
                attemptFound = true;
                String nmHost = amInfo.getNodeManagerHost();
                int nmHttpPort = amInfo.getNodeManagerHttpPort();
                int nmPort = amInfo.getNodeManagerPort();
                WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":" + nmHttpPort, nodeHttpAddress);
                WebServicesTestUtils.checkStringMatch("nodeId", NodeId.newInstance(nmHost, nmPort).toString(), nodeId);
                replacedertTrue("startime not greater than 0", startTime > 0);
                WebServicesTestUtils.checkStringMatch("containerId", amInfo.getContainerId().toString(), containerId);
                String localLogsLink = ujoin("node", "containerlogs", containerId, job.getUserName());
                replacedertTrue("logsLink", logsLink.contains(localLogsLink));
            }
        }
        replacedertTrue("attempt: " + id + " was not found", attemptFound);
    }
}

17 Source : TestAMWebServicesJobConf.java
with Apache License 2.0
from NJUJYB

/**
 * Test the app master web service Rest API for getting the job conf. This
 * requires created a temporary configuration file.
 *
 *   /ws/v1/mapreduce/job/{jobid}/conf
 */
public clreplaced TestAMWebServicesJobConf extends JerseyTest {

    private static Configuration conf = new Configuration();

    private static AppContext appContext;

    private static File testConfDir = new File("target", TestAMWebServicesJobConf.clreplaced.getSimpleName() + "confDir");

    private Injector injector = Guice.createInjector(new ServletModule() {

        @Override
        protected void configureServlets() {
            Path confPath = new Path(testConfDir.toString(), MRJobConfig.JOB_CONF_FILE);
            Configuration config = new Configuration();
            FileSystem localFs;
            try {
                localFs = FileSystem.getLocal(config);
                confPath = localFs.makeQualified(confPath);
                OutputStream out = localFs.create(confPath);
                try {
                    conf.writeXml(out);
                } finally {
                    out.close();
                }
                if (!localFs.exists(confPath)) {
                    fail("error creating config file: " + confPath);
                }
            } catch (IOException e) {
                fail("error creating config file: " + e.getMessage());
            }
            appContext = new MockAppContext(0, 2, 1, confPath);
            bind(JAXBContextResolver.clreplaced);
            bind(AMWebServices.clreplaced);
            bind(GenericExceptionHandler.clreplaced);
            bind(AppContext.clreplaced).toInstance(appContext);
            bind(Configuration.clreplaced).toInstance(conf);
            serve("/*").with(GuiceContainer.clreplaced);
        }
    });

    public clreplaced GuiceServletConfig extends GuiceServletContextListener {

        @Override
        protected Injector getInjector() {
            return injector;
        }
    }

    @Before
    @Override
    public void setUp() throws Exception {
        super.setUp();
        testConfDir.mkdir();
    }

    @AfterClreplaced
    static public void stop() {
        FileUtil.fullyDelete(testConfDir);
    }

    public TestAMWebServicesJobConf() {
        super(new WebAppDescriptor.Builder("org.apache.hadoop.mapreduce.v2.app.webapp").contextListenerClreplaced(GuiceServletConfig.clreplaced).filterClreplaced(com.google.inject.servlet.GuiceFilter.clreplaced).contextPath("jersey-guice-filter").servletPath("/").build());
    }

    @Test
    public void testJobConf() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("conf");
            verifyAMJobConf(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobConfSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("conf");
            verifyAMJobConf(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobConfDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
            JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
            replacedertEquals("incorrect number of elements", 1, json.length());
            JSONObject info = json.getJSONObject("conf");
            verifyAMJobConf(info, jobsMap.get(id));
        }
    }

    @Test
    public void testJobConfXML() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
            replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
            String xml = response.getEnreplacedy(String.clreplaced);
            DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
            DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
            InputSource is = new InputSource();
            is.setCharacterStream(new StringReader(xml));
            Doreplacedent dom = db.parse(is);
            NodeList info = dom.getElementsByTagName("conf");
            verifyAMJobConfXML(info, jobsMap.get(id));
        }
    }

    public void verifyAMJobConf(JSONObject info, Job job) throws JSONException {
        replacedertEquals("incorrect number of elements", 2, info.length());
        WebServicesTestUtils.checkStringMatch("path", job.getConfFile().toString(), info.getString("path"));
        // just do simple verification of fields - not data is correct
        // in the fields
        JSONArray properties = info.getJSONArray("property");
        for (int i = 0; i < properties.length(); i++) {
            JSONObject prop = properties.getJSONObject(i);
            String name = prop.getString("name");
            String value = prop.getString("value");
            replacedertTrue("name not set", (name != null && !name.isEmpty()));
            replacedertTrue("value not set", (value != null && !value.isEmpty()));
        }
    }

    public void verifyAMJobConfXML(NodeList nodes, Job job) {
        replacedertEquals("incorrect number of elements", 1, nodes.getLength());
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            WebServicesTestUtils.checkStringMatch("path", job.getConfFile().toString(), WebServicesTestUtils.getXmlString(element, "path"));
            // just do simple verification of fields - not data is correct
            // in the fields
            NodeList properties = element.getElementsByTagName("property");
            for (int j = 0; j < properties.getLength(); j++) {
                Element property = (Element) properties.item(j);
                replacedertNotNull("should have counters in the web service info", property);
                String name = WebServicesTestUtils.getXmlString(property, "name");
                String value = WebServicesTestUtils.getXmlString(property, "value");
                replacedertTrue("name not set", (name != null && !name.isEmpty()));
                replacedertTrue("name not set", (value != null && !value.isEmpty()));
            }
        }
    }
}

17 Source : TestAMWebServicesAttempts.java
with Apache License 2.0
from NJUJYB

/**
 * Test the app master web service Rest API for getting task attempts, a
 * specific task attempt, and task attempt counters
 *
 * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts
 * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
 * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters
 */
public clreplaced TestAMWebServicesAttempts extends JerseyTest {

    private static Configuration conf = new Configuration();

    private static AppContext appContext;

    private Injector injector = Guice.createInjector(new ServletModule() {

        @Override
        protected void configureServlets() {
            appContext = new MockAppContext(0, 1, 2, 1);
            bind(JAXBContextResolver.clreplaced);
            bind(AMWebServices.clreplaced);
            bind(GenericExceptionHandler.clreplaced);
            bind(AppContext.clreplaced).toInstance(appContext);
            bind(Configuration.clreplaced).toInstance(conf);
            serve("/*").with(GuiceContainer.clreplaced);
        }
    });

    public clreplaced GuiceServletConfig extends GuiceServletContextListener {

        @Override
        protected Injector getInjector() {
            return injector;
        }
    }

    @Before
    @Override
    public void setUp() throws Exception {
        super.setUp();
    }

    public TestAMWebServicesAttempts() {
        super(new WebAppDescriptor.Builder("org.apache.hadoop.mapreduce.v2.app.webapp").contextListenerClreplaced(GuiceServletConfig.clreplaced).filterClreplaced(com.google.inject.servlet.GuiceFilter.clreplaced).contextPath("jersey-guice-filter").servletPath("/").build());
    }

    @Test
    public void testTaskAttempts() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                verifyAMTaskAttempts(json, task);
            }
        }
    }

    @Test
    public void testTaskAttemptsSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                verifyAMTaskAttempts(json, task);
            }
        }
    }

    @Test
    public void testTaskAttemptsDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                verifyAMTaskAttempts(json, task);
            }
        }
    }

    @Test
    public void testTaskAttemptsXML() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
                replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
                String xml = response.getEnreplacedy(String.clreplaced);
                DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
                DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
                InputSource is = new InputSource();
                is.setCharacterStream(new StringReader(xml));
                Doreplacedent dom = db.parse(is);
                NodeList attempts = dom.getElementsByTagName("taskAttempts");
                replacedertEquals("incorrect number of elements", 1, attempts.getLength());
                NodeList nodes = dom.getElementsByTagName("taskAttempt");
                verifyAMTaskAttemptsXML(nodes, task);
            }
        }
    }

    @Test
    public void testTaskAttemptId() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                    JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                    replacedertEquals("incorrect number of elements", 1, json.length());
                    JSONObject info = json.getJSONObject("taskAttempt");
                    verifyAMTaskAttempt(info, att, task.getType());
                }
            }
        }
    }

    @Test
    public void testTaskAttemptIdSlash() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                    JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                    replacedertEquals("incorrect number of elements", 1, json.length());
                    JSONObject info = json.getJSONObject("taskAttempt");
                    verifyAMTaskAttempt(info, att, task.getType());
                }
            }
        }
    }

    @Test
    public void testTaskAttemptIdDefault() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                    JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                    replacedertEquals("incorrect number of elements", 1, json.length());
                    JSONObject info = json.getJSONObject("taskAttempt");
                    verifyAMTaskAttempt(info, att, task.getType());
                }
            }
        }
    }

    @Test
    public void testTaskAttemptIdXML() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
                    String xml = response.getEnreplacedy(String.clreplaced);
                    DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
                    DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
                    InputSource is = new InputSource();
                    is.setCharacterStream(new StringReader(xml));
                    Doreplacedent dom = db.parse(is);
                    NodeList nodes = dom.getElementsByTagName("taskAttempt");
                    for (int i = 0; i < nodes.getLength(); i++) {
                        Element element = (Element) nodes.item(i);
                        verifyAMTaskAttemptXML(element, att, task.getType());
                    }
                }
            }
        }
    }

    @Test
    public void testTaskAttemptIdBogus() throws JSONException, Exception {
        testTaskAttemptIdErrorGeneric("bogusid", "java.lang.Exception: TaskAttemptId string : bogusid is not properly formed");
    }

    @Test
    public void testTaskAttemptIdNonExist() throws JSONException, Exception {
        testTaskAttemptIdErrorGeneric("attempt_0_12345_m_000000_0", "java.lang.Exception: Error getting info on task attempt id attempt_0_12345_m_000000_0");
    }

    @Test
    public void testTaskAttemptIdInvalid() throws JSONException, Exception {
        testTaskAttemptIdErrorGeneric("attempt_0_12345_d_000000_0", "java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : attempt_0_12345_d_000000_0 is not properly formed.");
    }

    @Test
    public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
        testTaskAttemptIdErrorGeneric("attempt_12345_m_000000_0", "java.lang.Exception: TaskAttemptId string : attempt_12345_m_000000_0 is not properly formed");
    }

    @Test
    public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
        testTaskAttemptIdErrorGeneric("attempt_0_12345_m_000000", "java.lang.Exception: TaskAttemptId string : attempt_0_12345_m_000000 is not properly formed");
    }

    private void testTaskAttemptIdErrorGeneric(String attid, String error) throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                try {
                    r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(JSONObject.clreplaced);
                    fail("should have thrown exception on invalid uri");
                } catch (UniformInterfaceException ue) {
                    ClientResponse response = ue.getResponse();
                    replacedertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
                    replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                    JSONObject msg = response.getEnreplacedy(JSONObject.clreplaced);
                    JSONObject exception = msg.getJSONObject("RemoteException");
                    replacedertEquals("incorrect number of elements", 3, exception.length());
                    String message = exception.getString("message");
                    String type = exception.getString("exception");
                    String clreplacedname = exception.getString("javaClreplacedName");
                    WebServicesTestUtils.checkStringMatch("exception message", error, message);
                    WebServicesTestUtils.checkStringMatch("exception type", "NotFoundException", type);
                    WebServicesTestUtils.checkStringMatch("exception clreplacedname", "org.apache.hadoop.yarn.webapp.NotFoundException", clreplacedname);
                }
            }
        }
    }

    public void verifyAMTaskAttemptXML(Element element, TaskAttempt att, TaskType ttype) {
        verifyTaskAttemptGeneric(att, ttype, WebServicesTestUtils.getXmlString(element, "id"), WebServicesTestUtils.getXmlString(element, "state"), WebServicesTestUtils.getXmlString(element, "type"), WebServicesTestUtils.getXmlString(element, "rack"), WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"), WebServicesTestUtils.getXmlString(element, "diagnostics"), WebServicesTestUtils.getXmlString(element, "replacedignedContainerId"), WebServicesTestUtils.getXmlLong(element, "startTime"), WebServicesTestUtils.getXmlLong(element, "finishTime"), WebServicesTestUtils.getXmlLong(element, "elapsedTime"), WebServicesTestUtils.getXmlFloat(element, "progress"));
        if (ttype == TaskType.REDUCE) {
            verifyReduceTaskAttemptGeneric(att, WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"), WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"), WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"), WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"), WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
        }
    }

    public void verifyAMTaskAttempt(JSONObject info, TaskAttempt att, TaskType ttype) throws JSONException {
        if (ttype == TaskType.REDUCE) {
            replacedertEquals("incorrect number of elements", 17, info.length());
        } else {
            replacedertEquals("incorrect number of elements", 12, info.length());
        }
        verifyTaskAttemptGeneric(att, ttype, info.getString("id"), info.getString("state"), info.getString("type"), info.getString("rack"), info.getString("nodeHttpAddress"), info.getString("diagnostics"), info.getString("replacedignedContainerId"), info.getLong("startTime"), info.getLong("finishTime"), info.getLong("elapsedTime"), (float) info.getDouble("progress"));
        if (ttype == TaskType.REDUCE) {
            verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"), info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"), info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
        }
    }

    public void verifyAMTaskAttempts(JSONObject json, Task task) throws JSONException {
        replacedertEquals("incorrect number of elements", 1, json.length());
        JSONObject attempts = json.getJSONObject("taskAttempts");
        replacedertEquals("incorrect number of elements", 1, json.length());
        JSONArray arr = attempts.getJSONArray("taskAttempt");
        for (TaskAttempt att : task.getAttempts().values()) {
            TaskAttemptId id = att.getID();
            String attid = MRApps.toString(id);
            Boolean found = false;
            for (int i = 0; i < arr.length(); i++) {
                JSONObject info = arr.getJSONObject(i);
                if (attid.matches(info.getString("id"))) {
                    found = true;
                    verifyAMTaskAttempt(info, att, task.getType());
                }
            }
            replacedertTrue("task attempt with id: " + attid + " not in web service output", found);
        }
    }

    public void verifyAMTaskAttemptsXML(NodeList nodes, Task task) {
        replacedertEquals("incorrect number of elements", 1, nodes.getLength());
        for (TaskAttempt att : task.getAttempts().values()) {
            TaskAttemptId id = att.getID();
            String attid = MRApps.toString(id);
            Boolean found = false;
            for (int i = 0; i < nodes.getLength(); i++) {
                Element element = (Element) nodes.item(i);
                if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
                    found = true;
                    verifyAMTaskAttemptXML(element, att, task.getType());
                }
            }
            replacedertTrue("task with id: " + attid + " not in web service output", found);
        }
    }

    public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype, String id, String state, String type, String rack, String nodeHttpAddress, String diagnostics, String replacedignedContainerId, long startTime, long finishTime, long elapsedTime, float progress) {
        TaskAttemptId attid = ta.getID();
        String attemptId = MRApps.toString(attid);
        WebServicesTestUtils.checkStringMatch("id", attemptId, id);
        WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
        WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(), state);
        WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
        WebServicesTestUtils.checkStringMatch("nodeHttpAddress", ta.getNodeHttpAddress(), nodeHttpAddress);
        String expectDiag = "";
        List<String> diagnosticsList = ta.getDiagnostics();
        if (diagnosticsList != null && !diagnostics.isEmpty()) {
            StringBuffer b = new StringBuffer();
            for (String diag : diagnosticsList) {
                b.append(diag);
            }
            expectDiag = b.toString();
        }
        WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag, diagnostics);
        WebServicesTestUtils.checkStringMatch("replacedignedContainerId", ConverterUtils.toString(ta.getreplacedignedContainerID()), replacedignedContainerId);
        replacedertEquals("startTime wrong", ta.getLaunchTime(), startTime);
        replacedertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
        replacedertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
        replacedertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
    }

    public void verifyReduceTaskAttemptGeneric(TaskAttempt ta, long shuffleFinishTime, long mergeFinishTime, long elapsedShuffleTime, long elapsedMergeTime, long elapsedReduceTime) {
        replacedertEquals("shuffleFinishTime wrong", ta.getShuffleFinishTime(), shuffleFinishTime);
        replacedertEquals("mergeFinishTime wrong", ta.getSortFinishTime(), mergeFinishTime);
        replacedertEquals("elapsedShuffleTime wrong", ta.getShuffleFinishTime() - ta.getLaunchTime(), elapsedShuffleTime);
        replacedertEquals("elapsedMergeTime wrong", ta.getSortFinishTime() - ta.getShuffleFinishTime(), elapsedMergeTime);
        replacedertEquals("elapsedReduceTime wrong", ta.getFinishTime() - ta.getSortFinishTime(), elapsedReduceTime);
    }

    @Test
    public void testTaskAttemptIdCounters() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
                    JSONObject json = response.getEnreplacedy(JSONObject.clreplaced);
                    replacedertEquals("incorrect number of elements", 1, json.length());
                    JSONObject info = json.getJSONObject("jobTaskAttemptCounters");
                    verifyAMJobTaskAttemptCounters(info, att);
                }
            }
        }
    }

    @Test
    public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
        WebResource r = resource();
        Map<JobId, Job> jobsMap = appContext.getAllJobs();
        for (JobId id : jobsMap.keySet()) {
            String jobId = MRApps.toString(id);
            for (Task task : jobsMap.get(id).getTasks().values()) {
                String tid = MRApps.toString(task.getID());
                for (TaskAttempt att : task.getAttempts().values()) {
                    TaskAttemptId attemptid = att.getID();
                    String attid = MRApps.toString(attemptid);
                    ClientResponse response = r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.clreplaced);
                    replacedertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
                    String xml = response.getEnreplacedy(String.clreplaced);
                    DoreplacedentBuilderFactory dbf = DoreplacedentBuilderFactory.newInstance();
                    DoreplacedentBuilder db = dbf.newDoreplacedentBuilder();
                    InputSource is = new InputSource();
                    is.setCharacterStream(new StringReader(xml));
                    Doreplacedent dom = db.parse(is);
                    NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");
                    verifyAMTaskCountersXML(nodes, att);
                }
            }
        }
    }

    public void verifyAMJobTaskAttemptCounters(JSONObject info, TaskAttempt att) throws JSONException {
        replacedertEquals("incorrect number of elements", 2, info.length());
        WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()), info.getString("id"));
        // just do simple verification of fields - not data is correct
        // in the fields
        JSONArray counterGroups = info.getJSONArray("taskAttemptCounterGroup");
        for (int i = 0; i < counterGroups.length(); i++) {
            JSONObject counterGroup = counterGroups.getJSONObject(i);
            String name = counterGroup.getString("counterGroupName");
            replacedertTrue("name not set", (name != null && !name.isEmpty()));
            JSONArray counters = counterGroup.getJSONArray("counter");
            for (int j = 0; j < counters.length(); j++) {
                JSONObject counter = counters.getJSONObject(j);
                String counterName = counter.getString("name");
                replacedertTrue("name not set", (counterName != null && !counterName.isEmpty()));
                long value = counter.getLong("value");
                replacedertTrue("value  >= 0", value >= 0);
            }
        }
    }

    public void verifyAMTaskCountersXML(NodeList nodes, TaskAttempt att) {
        for (int i = 0; i < nodes.getLength(); i++) {
            Element element = (Element) nodes.item(i);
            WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()), WebServicesTestUtils.getXmlString(element, "id"));
            // just do simple verification of fields - not data is correct
            // in the fields
            NodeList groups = element.getElementsByTagName("taskAttemptCounterGroup");
            for (int j = 0; j < groups.getLength(); j++) {
                Element counters = (Element) groups.item(j);
                replacedertNotNull("should have counters in the web service info", counters);
                String name = WebServicesTestUtils.getXmlString(counters, "counterGroupName");
                replacedertTrue("name not set", (name != null && !name.isEmpty()));
                NodeList counterArr = counters.getElementsByTagName("counter");
                for (int z = 0; z < counterArr.getLength(); z++) {
                    Element counter = (Element) counterArr.item(z);
                    String counterName = WebServicesTestUtils.getXmlString(counter, "name");
                    replacedertTrue("counter name not set", (counterName != null && !counterName.isEmpty()));
                    long value = WebServicesTestUtils.getXmlLong(counter, "value");
                    replacedertTrue("value not >= 0", value >= 0);
                }
            }
        }
    }
}

17 Source : AMWebServices.java
with Apache License 2.0
from NJUJYB

/**
 * convert a job id string to an actual job and handle all the error checking.
 */
public static Job getJobFromJobIdString(String jid, AppContext appCtx) throws NotFoundException {
    JobId jobId;
    Job job;
    try {
        jobId = MRApps.toJobID(jid);
    } catch (YarnRuntimeException e) {
        // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
        // anymore but keeping it for now just in case other stuff starts failing.
        // Also, the webservice should ideally return BadRequest (HTTP:400) when
        // the id is malformed instead of NotFound (HTTP:404). The webserver on
        // top of which AMWebServices is built seems to automatically do that for
        // unhandled exceptions
        throw new NotFoundException(e.getMessage());
    } catch (IllegalArgumentException e) {
        throw new NotFoundException(e.getMessage());
    }
    if (jobId == null) {
        throw new NotFoundException("job, " + jid + ", is not found");
    }
    job = appCtx.getJob(jobId);
    if (job == null) {
        throw new NotFoundException("job, " + jid + ", is not found");
    }
    return job;
}

17 Source : StartEndTimesBase.java
with Apache License 2.0
from NJUJYB

abstract clreplaced StartEndTimesBase implements TaskRuntimeEstimator {

    static final float MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE = 0.05F;

    static final int MINIMUM_COMPLETE_NUMBER_TO_SPECULATE = 1;

    protected AppContext context = null;

    protected final Map<TaskAttemptId, Long> startTimes = new ConcurrentHashMap<TaskAttemptId, Long>();

    // XXXX This clreplaced design replacedumes that the contents of AppContext.getAllJobs
    // never changes.  Is that right?
    // 
    // This replacedumption comes in in several places, mostly in data structure that
    // can grow without limit if a AppContext gets new Job's when the old ones
    // run out.  Also, these mapper statistics blocks won't cover the Job's
    // we don't know about.
    protected final Map<Job, DataStatistics> mapperStatistics = new HashMap<Job, DataStatistics>();

    protected final Map<Job, DataStatistics> reducerStatistics = new HashMap<Job, DataStatistics>();

    private final Map<Job, Float> slowTaskRelativeTresholds = new HashMap<Job, Float>();

    protected final Set<Task> doneTasks = new HashSet<Task>();

    @Override
    public void enrollAttempt(TaskAttemptStatus status, long timestamp) {
        startTimes.put(status.id, timestamp);
    }

    @Override
    public long attemptEnrolledTime(TaskAttemptId attemptID) {
        Long result = startTimes.get(attemptID);
        return result == null ? Long.MAX_VALUE : result;
    }

    @Override
    public void contextualize(Configuration conf, AppContext context) {
        this.context = context;
        Map<JobId, Job> allJobs = context.getAllJobs();
        for (Map.Entry<JobId, Job> entry : allJobs.entrySet()) {
            final Job job = entry.getValue();
            mapperStatistics.put(job, new DataStatistics());
            reducerStatistics.put(job, new DataStatistics());
            slowTaskRelativeTresholds.put(job, conf.getFloat(MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD, 1.0f));
        }
    }

    protected DataStatistics dataStatisticsForTask(TaskId taskID) {
        JobId jobID = taskID.getJobId();
        Job job = context.getJob(jobID);
        if (job == null) {
            return null;
        }
        Task task = job.getTask(taskID);
        if (task == null) {
            return null;
        }
        return task.getType() == TaskType.MAP ? mapperStatistics.get(job) : task.getType() == TaskType.REDUCE ? reducerStatistics.get(job) : null;
    }

    @Override
    public long thresholdRuntime(TaskId taskID) {
        JobId jobID = taskID.getJobId();
        Job job = context.getJob(jobID);
        TaskType type = taskID.getTaskType();
        DataStatistics statistics = dataStatisticsForTask(taskID);
        int completedTasksOfType = type == TaskType.MAP ? job.getCompletedMaps() : job.getCompletedReduces();
        int totalTasksOfType = type == TaskType.MAP ? job.getTotalMaps() : job.getTotalReduces();
        if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE || (((float) completedTasksOfType) / totalTasksOfType) < MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE) {
            return Long.MAX_VALUE;
        }
        long result = statistics == null ? Long.MAX_VALUE : (long) statistics.outlier(slowTaskRelativeTresholds.get(job));
        return result;
    }

    @Override
    public long estimatedNewAttemptRuntime(TaskId id) {
        DataStatistics statistics = dataStatisticsForTask(id);
        if (statistics == null) {
            return -1L;
        }
        return (long) statistics.mean();
    }

    @Override
    public void updateAttempt(TaskAttemptStatus status, long timestamp) {
        TaskAttemptId attemptID = status.id;
        TaskId taskID = attemptID.getTaskId();
        JobId jobID = taskID.getJobId();
        Job job = context.getJob(jobID);
        if (job == null) {
            return;
        }
        Task task = job.getTask(taskID);
        if (task == null) {
            return;
        }
        Long boxedStart = startTimes.get(attemptID);
        long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
        TaskAttempt taskAttempt = task.getAttempt(attemptID);
        if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
            boolean isNew = false;
            // is this  a new success?
            synchronized (doneTasks) {
                if (!doneTasks.contains(task)) {
                    doneTasks.add(task);
                    isNew = true;
                }
            }
            // It's a new completion
            // Note that if a task completes twice [because of a previous speculation
            // and a race, or a success followed by loss of the machine with the
            // local data] we only count the first one.
            if (isNew) {
                long finish = timestamp;
                if (start > 1L && finish > 1L && start <= finish) {
                    long duration = finish - start;
                    DataStatistics statistics = dataStatisticsForTask(taskID);
                    if (statistics != null) {
                        statistics.add(duration);
                    }
                }
            }
        }
    }
}

17 Source : NullTaskRuntimesEngine.java
with Apache License 2.0
from NJUJYB

@Override
public void contextualize(Configuration conf, AppContext context) {
// no code
}

17 Source : ExponentiallySmoothedTaskRuntimeEstimator.java
with Apache License 2.0
from NJUJYB

@Override
public void contextualize(Configuration conf, AppContext context) {
    super.contextualize(conf, context);
    lambda = conf.getLong(MRJobConfig.MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS, MRJobConfig.DEFAULT_MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS);
    smoothedValue = conf.getBoolean(MRJobConfig.MR_AM_TASK_ESTIMATOR_EXPONENTIAL_RATE_ENABLE, true) ? SmoothedValue.RATE : SmoothedValue.TIME_PER_UNIT_PROGRESS;
}

16 Source : TestHsWebServices.java
with Apache License 2.0
from NJUJYB

public void verifyHSInfo(JSONObject info, AppContext ctx) throws JSONException {
    replacedertEquals("incorrect number of elements", 4, info.length());
    verifyHsInfoGeneric(info.getString("hadoopVersionBuiltOn"), info.getString("hadoopBuildVersion"), info.getString("hadoopVersion"), info.getLong("startedOn"));
}

16 Source : TestAMWebServices.java
with Apache License 2.0
from NJUJYB

public void verifyAMInfo(JSONObject info, AppContext ctx) throws JSONException {
    replacedertEquals("incorrect number of elements", 5, info.length());
    verifyAMInfoGeneric(ctx, info.getString("appId"), info.getString("user"), info.getString("name"), info.getLong("startedOn"), info.getLong("elapsedTime"));
}

16 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testTasksView() {
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = getTaskParams(appContext);
    WebAppTests.testPage(TasksPage.clreplaced, AppContext.clreplaced, appContext, params);
}

16 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testCountersView() {
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = getJobParams(appContext);
    WebAppTests.testPage(CountersPage.clreplaced, AppContext.clreplaced, appContext, params);
}

16 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testJobView() {
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = getJobParams(appContext);
    WebAppTests.testPage(JobPage.clreplaced, AppContext.clreplaced, appContext, params);
}

16 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testTaskCountersView() {
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = getTaskParams(appContext);
    WebAppTests.testPage(CountersPage.clreplaced, AppContext.clreplaced, appContext, params);
}

16 Source : App.java
with Apache License 2.0
from NJUJYB

@RequestScoped
public clreplaced App {

    final AppContext context;

    private Job job;

    private Task task;

    @Inject
    App(AppContext ctx) {
        context = ctx;
    }

    void setJob(Job job) {
        this.job = job;
    }

    public Job getJob() {
        return job;
    }

    void setTask(Task task) {
        this.task = task;
    }

    public Task getTask() {
        return task;
    }
}

16 Source : AMWebServices.java
with Apache License 2.0
from NJUJYB

@Path("/ws/v1/mapreduce")
public clreplaced AMWebServices {

    private final AppContext appCtx;

    private final App app;

    @Context
    private HttpServletResponse response;

    @Inject
    public AMWebServices(final App app, final AppContext context) {
        this.appCtx = context;
        this.app = app;
    }

    Boolean hasAccess(Job job, HttpServletRequest request) {
        String remoteUser = request.getRemoteUser();
        UserGroupInformation callerUGI = null;
        if (remoteUser != null) {
            callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
        }
        if (callerUGI != null && !job.checkAccess(callerUGI, JobACL.VIEW_JOB)) {
            return false;
        }
        return true;
    }

    private void init() {
        // clear content type
        response.setContentType(null);
    }

    /**
     * convert a job id string to an actual job and handle all the error checking.
     */
    public static Job getJobFromJobIdString(String jid, AppContext appCtx) throws NotFoundException {
        JobId jobId;
        Job job;
        try {
            jobId = MRApps.toJobID(jid);
        } catch (YarnRuntimeException e) {
            // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
            // anymore but keeping it for now just in case other stuff starts failing.
            // Also, the webservice should ideally return BadRequest (HTTP:400) when
            // the id is malformed instead of NotFound (HTTP:404). The webserver on
            // top of which AMWebServices is built seems to automatically do that for
            // unhandled exceptions
            throw new NotFoundException(e.getMessage());
        } catch (IllegalArgumentException e) {
            throw new NotFoundException(e.getMessage());
        }
        if (jobId == null) {
            throw new NotFoundException("job, " + jid + ", is not found");
        }
        job = appCtx.getJob(jobId);
        if (job == null) {
            throw new NotFoundException("job, " + jid + ", is not found");
        }
        return job;
    }

    /**
     * convert a task id string to an actual task and handle all the error
     * checking.
     */
    public static Task getTaskFromTaskIdString(String tid, Job job) throws NotFoundException {
        TaskId taskID;
        Task task;
        try {
            taskID = MRApps.toTaskID(tid);
        } catch (YarnRuntimeException e) {
            // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
            // anymore but keeping it for now just in case other stuff starts failing.
            // Also, the webservice should ideally return BadRequest (HTTP:400) when
            // the id is malformed instead of NotFound (HTTP:404). The webserver on
            // top of which AMWebServices is built seems to automatically do that for
            // unhandled exceptions
            throw new NotFoundException(e.getMessage());
        } catch (NumberFormatException ne) {
            throw new NotFoundException(ne.getMessage());
        } catch (IllegalArgumentException e) {
            throw new NotFoundException(e.getMessage());
        }
        if (taskID == null) {
            throw new NotFoundException("taskid " + tid + " not found or invalid");
        }
        task = job.getTask(taskID);
        if (task == null) {
            throw new NotFoundException("task not found with id " + tid);
        }
        return task;
    }

    /**
     * convert a task attempt id string to an actual task attempt and handle all
     * the error checking.
     */
    public static TaskAttempt getTaskAttemptFromTaskAttemptString(String attId, Task task) throws NotFoundException {
        TaskAttemptId attemptId;
        TaskAttempt ta;
        try {
            attemptId = MRApps.toTaskAttemptID(attId);
        } catch (YarnRuntimeException e) {
            // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here
            // anymore but keeping it for now just in case other stuff starts failing.
            // Also, the webservice should ideally return BadRequest (HTTP:400) when
            // the id is malformed instead of NotFound (HTTP:404). The webserver on
            // top of which AMWebServices is built seems to automatically do that for
            // unhandled exceptions
            throw new NotFoundException(e.getMessage());
        } catch (NumberFormatException ne) {
            throw new NotFoundException(ne.getMessage());
        } catch (IllegalArgumentException e) {
            throw new NotFoundException(e.getMessage());
        }
        if (attemptId == null) {
            throw new NotFoundException("task attempt id " + attId + " not found or invalid");
        }
        ta = task.getAttempt(attemptId);
        if (ta == null) {
            throw new NotFoundException("Error getting info on task attempt id " + attId);
        }
        return ta;
    }

    /**
     * check for job access.
     *
     * @param job
     *          the job that is being accessed
     */
    void checkAccess(Job job, HttpServletRequest request) {
        if (!hasAccess(job, request)) {
            throw new WebApplicationException(Status.UNAUTHORIZED);
        }
    }

    @GET
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public AppInfo get() {
        return getAppInfo();
    }

    @GET
    @Path("/info")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public AppInfo getAppInfo() {
        init();
        return new AppInfo(this.app, this.app.context);
    }

    @GET
    @Path("/blacklistednodes")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public BlacklistedNodesInfo getBlacklistedNodes() {
        init();
        return new BlacklistedNodesInfo(this.app.context);
    }

    @GET
    @Path("/jobs")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public JobsInfo getJobs(@Context HttpServletRequest hsr) {
        init();
        JobsInfo allJobs = new JobsInfo();
        for (Job job : appCtx.getAllJobs().values()) {
            // getAllJobs only gives you a partial we want a full
            Job fullJob = appCtx.getJob(job.getID());
            if (fullJob == null) {
                continue;
            }
            allJobs.add(new JobInfo(fullJob, hasAccess(fullJob, hsr)));
        }
        return allJobs;
    }

    @GET
    @Path("/jobs/{jobid}")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public JobInfo getJob(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        return new JobInfo(job, hasAccess(job, hsr));
    }

    @GET
    @Path("/jobs/{jobid}/jobattempts")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        AMAttemptsInfo amAttempts = new AMAttemptsInfo();
        for (AMInfo amInfo : job.getAMInfos()) {
            AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(job.getID()), job.getUserName());
            amAttempts.add(attempt);
        }
        return amAttempts;
    }

    @GET
    @Path("/jobs/{jobid}/counters")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        return new JobCounterInfo(this.appCtx, job);
    }

    @GET
    @Path("/jobs/{jobid}/conf")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public ConfInfo getJobConf(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        ConfInfo info;
        try {
            info = new ConfInfo(job);
        } catch (IOException e) {
            throw new NotFoundException("unable to load configuration for job: " + jid);
        }
        return info;
    }

    @GET
    @Path("/jobs/{jobid}/tasks")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public TasksInfo getJobTasks(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @QueryParam("type") String type) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        TasksInfo allTasks = new TasksInfo();
        for (Task task : job.getTasks().values()) {
            TaskType ttype = null;
            if (type != null && !type.isEmpty()) {
                try {
                    ttype = MRApps.taskType(type);
                } catch (YarnRuntimeException e) {
                    throw new BadRequestException("tasktype must be either m or r");
                }
            }
            if (ttype != null && task.getType() != ttype) {
                continue;
            }
            allTasks.add(new TaskInfo(task));
        }
        return allTasks;
    }

    @GET
    @Path("/jobs/{jobid}/tasks/{taskid}")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public TaskInfo getJobTask(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        Task task = getTaskFromTaskIdString(tid, job);
        return new TaskInfo(task);
    }

    @GET
    @Path("/jobs/{jobid}/tasks/{taskid}/counters")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public JobTaskCounterInfo getSingleTaskCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        Task task = getTaskFromTaskIdString(tid, job);
        return new JobTaskCounterInfo(task);
    }

    @GET
    @Path("/jobs/{jobid}/tasks/{taskid}/attempts")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
        init();
        TaskAttemptsInfo attempts = new TaskAttemptsInfo();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        Task task = getTaskFromTaskIdString(tid, job);
        for (TaskAttempt ta : task.getAttempts().values()) {
            if (ta != null) {
                if (task.getType() == TaskType.REDUCE) {
                    attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
                } else {
                    attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
                }
            }
        }
        return attempts;
    }

    @GET
    @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        Task task = getTaskFromTaskIdString(tid, job);
        TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
        if (task.getType() == TaskType.REDUCE) {
            return new ReduceTaskAttemptInfo(ta, task.getType());
        } else {
            return new TaskAttemptInfo(ta, task.getType(), true);
        }
    }

    @GET
    @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
    @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
    public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
        init();
        Job job = getJobFromJobIdString(jid, appCtx);
        checkAccess(job, hsr);
        Task task = getTaskFromTaskIdString(tid, job);
        TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
        return new JobTaskAttemptCounterInfo(ta);
    }
}

16 Source : MRClientService.java
with Apache License 2.0
from NJUJYB

/**
 * This module is responsible for talking to the
 * jobclient (user facing).
 */
public clreplaced MRClientService extends AbstractService implements ClientService {

    static final Log LOG = LogFactory.getLog(MRClientService.clreplaced);

    private MRClientProtocol protocolHandler;

    private Server server;

    private WebApp webApp;

    private InetSocketAddress bindAddress;

    private AppContext appContext;

    public MRClientService(AppContext appContext) {
        super(MRClientService.clreplaced.getName());
        this.appContext = appContext;
        this.protocolHandler = new MRClientProtocolHandler();
    }

    protected void serviceStart() throws Exception {
        Configuration conf = getConfig();
        YarnRPC rpc = YarnRPC.create(conf);
        InetSocketAddress address = new InetSocketAddress(0);
        server = rpc.getServer(MRClientProtocol.clreplaced, protocolHandler, address, conf, appContext.getClientToAMTokenSecretManager(), conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT), MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE);
        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            refreshServiceAcls(conf, new MRAMPolicyProvider());
        }
        server.start();
        this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), server.getListenerAddress().getPort());
        LOG.info("Instantiated MRClientService at " + this.bindAddress);
        try {
            // Explicitly disabling SSL for map reduce task as we can't allow MR users
            // to gain access to keystore file for opening SSL listener. We can trust
            // RM/NM to issue SSL certificates but definitely not MR-AM as it is
            // running in user-land.
            webApp = WebApps.$for("mapreduce", AppContext.clreplaced, appContext, "ws").withHttpPolicy(conf, Policy.HTTP_ONLY).start(new AMWebApp());
        } catch (Exception e) {
            LOG.error("Webapps failed to start. Ignoring for now:", e);
        }
        super.serviceStart();
    }

    void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
        this.server.refreshServiceAcl(configuration, policyProvider);
    }

    @Override
    protected void serviceStop() throws Exception {
        if (server != null) {
            server.stop();
        }
        if (webApp != null) {
            webApp.stop();
        }
        super.serviceStop();
    }

    @Override
    public InetSocketAddress getBindAddress() {
        return bindAddress;
    }

    @Override
    public int getHttpPort() {
        return webApp.port();
    }

    clreplaced MRClientProtocolHandler implements MRClientProtocol {

        private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);

        @Override
        public InetSocketAddress getConnectAddress() {
            return getBindAddress();
        }

        private Job verifyAndGetJob(JobId jobID, JobACL accessType) throws IOException {
            Job job = appContext.getJob(jobID);
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            if (!job.checkAccess(ugi, accessType)) {
                throw new AccessControlException("User " + ugi.getShortUserName() + " cannot perform operation " + accessType.name() + " on " + jobID);
            }
            return job;
        }

        private Task verifyAndGetTask(TaskId taskID, JobACL accessType) throws IOException {
            Task task = verifyAndGetJob(taskID.getJobId(), accessType).getTask(taskID);
            if (task == null) {
                throw new IOException("Unknown Task " + taskID);
            }
            return task;
        }

        private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, JobACL accessType) throws IOException {
            TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), accessType).getAttempt(attemptID);
            if (attempt == null) {
                throw new IOException("Unknown TaskAttempt " + attemptID);
            }
            return attempt;
        }

        @Override
        public GetCountersResponse getCounters(GetCountersRequest request) throws IOException {
            JobId jobId = request.getJobId();
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
            GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.clreplaced);
            response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
            return response;
        }

        @Override
        public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException {
            JobId jobId = request.getJobId();
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
            GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.clreplaced);
            if (job != null) {
                response.setJobReport(job.getReport());
            } else {
                response.setJobReport(null);
            }
            return response;
        }

        @Override
        public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.clreplaced);
            response.setTaskAttemptReport(verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport());
            return response;
        }

        @Override
        public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws IOException {
            TaskId taskId = request.getTaskId();
            GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.clreplaced);
            response.setTaskReport(verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport());
            return response;
        }

        @Override
        public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(GetTaskAttemptCompletionEventsRequest request) throws IOException {
            JobId jobId = request.getJobId();
            int fromEventId = request.getFromEventId();
            int maxEvents = request.getMaxEvents();
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
            GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.clreplaced);
            response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillJobResponse killJob(KillJobRequest request) throws IOException {
            JobId jobId = request.getJobId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill job " + jobId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetJob(jobId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new JobDiagnosticsUpdateEvent(jobId, message));
            appContext.getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
            KillJobResponse response = recordFactory.newRecordInstance(KillJobResponse.clreplaced);
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillTaskResponse killTask(KillTaskRequest request) throws IOException {
            TaskId taskId = request.getTaskId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill task " + taskId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskEvent(taskId, TaskEventType.T_KILL));
            KillTaskResponse response = recordFactory.newRecordInstance(KillTaskResponse.clreplaced);
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
            appContext.getEventHandler().handle(new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_KILL));
            KillTaskAttemptResponse response = recordFactory.newRecordInstance(KillTaskAttemptResponse.clreplaced);
            return response;
        }

        @Override
        public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.clreplaced);
            response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getDiagnostics());
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Fail task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
            appContext.getEventHandler().handle(new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_FAILMSG));
            FailTaskAttemptResponse response = recordFactory.newRecordInstance(FailTaskAttemptResponse.clreplaced);
            return response;
        }

        private final Object getTaskReportsLock = new Object();

        @Override
        public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws IOException {
            JobId jobId = request.getJobId();
            TaskType taskType = request.getTaskType();
            GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.clreplaced);
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
            Collection<Task> tasks = job.getTasks(taskType).values();
            LOG.info("Getting task report for " + taskType + "   " + jobId + ". Report-size will be " + tasks.size());
            // Take lock to allow only one call, otherwise heap will blow up because
            // of counters in the report when there are multiple callers.
            synchronized (getTaskReportsLock) {
                for (Task task : tasks) {
                    response.addTaskReport(task.getReport());
                }
            }
            return response;
        }

        @Override
        public GetDelegationTokenResponse getDelegationToken(GetDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to issue delegation" + " token");
        }

        @Override
        public RenewDelegationTokenResponse renewDelegationToken(RenewDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to renew delegation" + " token");
        }

        @Override
        public CancelDelegationTokenResponse cancelDelegationToken(CancelDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to cancel delegation" + " token");
        }
    }

    public WebApp getWebApp() {
        return webApp;
    }
}

16 Source : MRClientService.java
with Apache License 2.0
from naver

/**
 * This module is responsible for talking to the
 * jobclient (user facing).
 */
public clreplaced MRClientService extends AbstractService implements ClientService {

    static final Log LOG = LogFactory.getLog(MRClientService.clreplaced);

    private MRClientProtocol protocolHandler;

    private Server server;

    private WebApp webApp;

    private InetSocketAddress bindAddress;

    private AppContext appContext;

    public MRClientService(AppContext appContext) {
        super(MRClientService.clreplaced.getName());
        this.appContext = appContext;
        this.protocolHandler = new MRClientProtocolHandler();
    }

    protected void serviceStart() throws Exception {
        Configuration conf = getConfig();
        YarnRPC rpc = YarnRPC.create(conf);
        InetSocketAddress address = new InetSocketAddress(0);
        server = rpc.getServer(MRClientProtocol.clreplaced, protocolHandler, address, conf, appContext.getClientToAMTokenSecretManager(), conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT), MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE);
        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            refreshServiceAcls(conf, new MRAMPolicyProvider());
        }
        server.start();
        this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), server.getListenerAddress().getPort());
        LOG.info("Instantiated MRClientService at " + this.bindAddress);
        try {
            // Explicitly disabling SSL for map reduce task as we can't allow MR users
            // to gain access to keystore file for opening SSL listener. We can trust
            // RM/NM to issue SSL certificates but definitely not MR-AM as it is
            // running in user-land.
            webApp = WebApps.$for("mapreduce", AppContext.clreplaced, appContext, "ws").withHttpPolicy(conf, Policy.HTTP_ONLY).start(new AMWebApp());
        } catch (Exception e) {
            LOG.error("Webapps failed to start. Ignoring for now:", e);
        }
        super.serviceStart();
    }

    void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
        this.server.refreshServiceAcl(configuration, policyProvider);
    }

    @Override
    protected void serviceStop() throws Exception {
        if (server != null) {
            server.stop();
        }
        if (webApp != null) {
            webApp.stop();
        }
        super.serviceStop();
    }

    @Override
    public InetSocketAddress getBindAddress() {
        return bindAddress;
    }

    @Override
    public int getHttpPort() {
        return webApp.port();
    }

    clreplaced MRClientProtocolHandler implements MRClientProtocol {

        private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);

        @Override
        public InetSocketAddress getConnectAddress() {
            return getBindAddress();
        }

        private Job verifyAndGetJob(JobId jobID, JobACL accessType, boolean exceptionThrow) throws IOException {
            Job job = appContext.getJob(jobID);
            if (job == null && exceptionThrow) {
                throw new IOException("Unknown Job " + jobID);
            }
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            if (job != null && !job.checkAccess(ugi, accessType)) {
                throw new AccessControlException("User " + ugi.getShortUserName() + " cannot perform operation " + accessType.name() + " on " + jobID);
            }
            return job;
        }

        private Task verifyAndGetTask(TaskId taskID, JobACL accessType) throws IOException {
            Task task = verifyAndGetJob(taskID.getJobId(), accessType, true).getTask(taskID);
            if (task == null) {
                throw new IOException("Unknown Task " + taskID);
            }
            return task;
        }

        private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, JobACL accessType) throws IOException {
            TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), accessType).getAttempt(attemptID);
            if (attempt == null) {
                throw new IOException("Unknown TaskAttempt " + attemptID);
            }
            return attempt;
        }

        @Override
        public GetCountersResponse getCounters(GetCountersRequest request) throws IOException {
            JobId jobId = request.getJobId();
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
            GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.clreplaced);
            response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
            return response;
        }

        @Override
        public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException {
            JobId jobId = request.getJobId();
            // false is for retain compatibility
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, false);
            GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.clreplaced);
            if (job != null) {
                response.setJobReport(job.getReport());
            } else {
                response.setJobReport(null);
            }
            return response;
        }

        @Override
        public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.clreplaced);
            response.setTaskAttemptReport(verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport());
            return response;
        }

        @Override
        public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws IOException {
            TaskId taskId = request.getTaskId();
            GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.clreplaced);
            response.setTaskReport(verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport());
            return response;
        }

        @Override
        public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(GetTaskAttemptCompletionEventsRequest request) throws IOException {
            JobId jobId = request.getJobId();
            int fromEventId = request.getFromEventId();
            int maxEvents = request.getMaxEvents();
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
            GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.clreplaced);
            response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillJobResponse killJob(KillJobRequest request) throws IOException {
            JobId jobId = request.getJobId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill job " + jobId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetJob(jobId, JobACL.MODIFY_JOB, false);
            appContext.getEventHandler().handle(new JobDiagnosticsUpdateEvent(jobId, message));
            appContext.getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
            KillJobResponse response = recordFactory.newRecordInstance(KillJobResponse.clreplaced);
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillTaskResponse killTask(KillTaskRequest request) throws IOException {
            TaskId taskId = request.getTaskId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill task " + taskId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetTask(taskId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskEvent(taskId, TaskEventType.T_KILL));
            KillTaskResponse response = recordFactory.newRecordInstance(KillTaskResponse.clreplaced);
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Kill task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
            appContext.getEventHandler().handle(new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_KILL));
            KillTaskAttemptResponse response = recordFactory.newRecordInstance(KillTaskAttemptResponse.clreplaced);
            return response;
        }

        @Override
        public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.clreplaced);
            response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getDiagnostics());
            return response;
        }

        @SuppressWarnings("unchecked")
        @Override
        public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws IOException {
            TaskAttemptId taskAttemptId = request.getTaskAttemptId();
            UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
            String message = "Fail task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress();
            LOG.info(message);
            verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
            appContext.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
            appContext.getEventHandler().handle(new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_FAILMSG));
            FailTaskAttemptResponse response = recordFactory.newRecordInstance(FailTaskAttemptResponse.clreplaced);
            return response;
        }

        private final Object getTaskReportsLock = new Object();

        @Override
        public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws IOException {
            JobId jobId = request.getJobId();
            TaskType taskType = request.getTaskType();
            GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.clreplaced);
            Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
            Collection<Task> tasks = job.getTasks(taskType).values();
            LOG.info("Getting task report for " + taskType + "   " + jobId + ". Report-size will be " + tasks.size());
            // Take lock to allow only one call, otherwise heap will blow up because
            // of counters in the report when there are multiple callers.
            synchronized (getTaskReportsLock) {
                for (Task task : tasks) {
                    response.addTaskReport(task.getReport());
                }
            }
            return response;
        }

        @Override
        public GetDelegationTokenResponse getDelegationToken(GetDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to issue delegation" + " token");
        }

        @Override
        public RenewDelegationTokenResponse renewDelegationToken(RenewDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to renew delegation" + " token");
        }

        @Override
        public CancelDelegationTokenResponse cancelDelegationToken(CancelDelegationTokenRequest request) throws IOException {
            throw new IOException("MR AM not authorized to cancel delegation" + " token");
        }
    }

    public WebApp getWebApp() {
        return webApp;
    }
}

15 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testTaskView() {
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = getTaskParams(appContext);
    App app = new App(appContext);
    app.setJob(appContext.getAllJobs().values().iterator().next());
    app.setTask(app.getJob().getTasks().values().iterator().next());
    WebAppTests.testPage(TaskPage.clreplaced, App.clreplaced, app, params);
}

15 Source : TestJobImpl.java
with Apache License 2.0
from NJUJYB

private static StubbedJob createRunningStubbedJob(Configuration conf, Dispatcher dispatcher, int numSplits, AppContext appContext) {
    StubbedJob job = createStubbedJob(conf, dispatcher, numSplits, appContext);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT));
    replacedertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(job.getID()));
    replacedertJobState(job, JobStateInternal.RUNNING);
    return job;
}

15 Source : LocalContainerLauncher.java
with Apache License 2.0
from NJUJYB

/**
 * Runs the container task locally in a thread.
 * Since all (sub)tasks share the same local directory, they must be executed
 * sequentially in order to avoid creating/deleting the same files/dirs.
 */
public clreplaced LocalContainerLauncher extends AbstractService implements ContainerLauncher {

    private static final File curDir = new File(".");

    private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.clreplaced);

    private FileContext curFC = null;

    private final HashSet<File> localizedFiles;

    private final AppContext context;

    private final TaskUmbilicalProtocol umbilical;

    private ExecutorService taskRunner;

    private Thread eventHandler;

    private BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>();

    public LocalContainerLauncher(AppContext context, TaskUmbilicalProtocol umbilical) {
        super(LocalContainerLauncher.clreplaced.getName());
        this.context = context;
        this.umbilical = umbilical;
        // umbilical:  MRAppMaster creates (taskAttemptListener), preplacedes to us
        // (TODO/FIXME:  pointless to use RPC to talk to self; should create
        // LocalTaskAttemptListener or similar:  implement umbilical protocol
        // but skip RPC stuff)
        try {
            curFC = FileContext.getFileContext(curDir.toURI());
        } catch (UnsupportedFileSystemException ufse) {
            LOG.error("Local filesystem " + curDir.toURI().toString() + " is unsupported?? (should never happen)");
        }
        // Save list of files/dirs that are supposed to be present so can delete
        // any extras created by one task before starting subsequent task.  Note
        // that there's no protection against deleted or renamed localization;
        // users who do that get what they deserve (and will have to disable
        // uberization in order to run correctly).
        File[] curLocalFiles = curDir.listFiles();
        localizedFiles = new HashSet<File>(curLocalFiles.length);
        for (int j = 0; j < curLocalFiles.length; ++j) {
            localizedFiles.add(curLocalFiles[j]);
        }
    // Relocalization note/future FIXME (per chrisdo, 20110315):  At moment,
    // full localization info is in AppSubmissionContext preplaceded from client to
    // RM and then to NM for AM-container launch:  no difference between AM-
    // localization and MapTask- or ReduceTask-localization, so can replacedume all
    // OK.  Longer-term, will need to override uber-AM container-localization
    // request ("needed resources") with union of regular-AM-resources + task-
    // resources (and, if maps and reduces ever differ, then union of all three
    // types), OR will need localizer service/API that uber-AM can request
    // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
    }

    public void serviceStart() throws Exception {
        // create a single thread for serial execution of tasks
        // make it a daemon thread so that the process can exit even if the task is
        // not interruptible
        taskRunner = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("uber-SubtaskRunner").build());
        // create and start an event handling thread
        eventHandler = new Thread(new EventHandler(), "uber-EventHandler");
        eventHandler.start();
        super.serviceStart();
    }

    public void serviceStop() throws Exception {
        if (eventHandler != null) {
            eventHandler.interrupt();
        }
        if (taskRunner != null) {
            taskRunner.shutdownNow();
        }
        super.serviceStop();
    }

    @Override
    public void handle(ContainerLauncherEvent event) {
        try {
            eventQueue.put(event);
        } catch (InterruptedException e) {
            // FIXME? YarnRuntimeException is "for runtime exceptions only"
            throw new YarnRuntimeException(e);
        }
    }

    /*
   * Uber-AM lifecycle/ordering ("normal" case):
   *
   * - [somebody] sends TA_replacedIGNED
   *   - handled by ContainerreplacedignedTransition (TaskAttemptImpl.java)
   *     - creates "remoteTask" for us == real Task
   *     - sends CONTAINER_REMOTE_LAUNCH
   *     - TA: UNreplacedIGNED -> replacedIGNED
   * - CONTAINER_REMOTE_LAUNCH handled by LocalContainerLauncher (us)
   *   - sucks "remoteTask" out of TaskAttemptImpl via getRemoteTask()
   *   - sends TA_CONTAINER_LAUNCHED
   *     [[ elsewhere...
   *       - TA_CONTAINER_LAUNCHED handled by LaunchedContainerTransition
   *         - registers "remoteTask" with TaskAttemptListener (== umbilical)
   *         - NUKES "remoteTask"
   *         - sends T_ATTEMPT_LAUNCHED (Task: SCHEDULED -> RUNNING)
   *         - TA: replacedIGNED -> RUNNING
   *     ]]
   *   - runs Task (runSubMap() or runSubReduce())
   *     - TA can safely send TA_UPDATE since in RUNNING state
   */
    private clreplaced EventHandler implements Runnable {

        // doneWithMaps and finishedSubMaps are accessed from only
        // one thread. Therefore, no need to make them volatile.
        private boolean doneWithMaps = false;

        private int finishedSubMaps = 0;

        private final Map<TaskAttemptId, Future<?>> futures = new ConcurrentHashMap<TaskAttemptId, Future<?>>();

        EventHandler() {
        }

        @SuppressWarnings("unchecked")
        @Override
        public void run() {
            ContainerLauncherEvent event = null;
            // Collect locations of map outputs to give to reduces
            final Map<TaskAttemptID, MapOutputFile> localMapFiles = new HashMap<TaskAttemptID, MapOutputFile>();
            // _must_ either run subtasks sequentially or accept expense of new JVMs
            // (i.e., fork()), else will get weird failures when maps try to create/
            // write same dirname or filename:  no chdir() in Java
            while (!Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    // mostly via T_KILL? JOB_KILL?
                    LOG.error("Returning, interrupted : " + e);
                    break;
                }
                LOG.info("Processing the event " + event.toString());
                if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
                    final ContainerRemoteLaunchEvent launchEv = (ContainerRemoteLaunchEvent) event;
                    // execute the task on a separate thread
                    Future<?> future = taskRunner.submit(new Runnable() {

                        public void run() {
                            runTask(launchEv, localMapFiles);
                        }
                    });
                    // remember the current attempt
                    futures.put(event.getTaskAttemptID(), future);
                } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
                    // cancel (and interrupt) the current running task replacedociated with the
                    // event
                    TaskAttemptId taId = event.getTaskAttemptID();
                    Future<?> future = futures.remove(taId);
                    if (future != null) {
                        LOG.info("canceling the task attempt " + taId);
                        future.cancel(true);
                    }
                    // send "cleaned" event to task attempt to move us from
                    // SUCCESS_CONTAINER_CLEANUP to SUCCEEDED state (or
                    // {FAIL|KILL}_CONTAINER_CLEANUP to {FAIL|KILL}_TASK_CLEANUP)
                    context.getEventHandler().handle(new TaskAttemptEvent(taId, TaskAttemptEventType.TA_CONTAINER_CLEANED));
                } else {
                    LOG.warn("Ignoring unexpected event " + event.toString());
                }
            }
        }

        @SuppressWarnings("unchecked")
        private void runTask(ContainerRemoteLaunchEvent launchEv, Map<TaskAttemptID, MapOutputFile> localMapFiles) {
            TaskAttemptId attemptID = launchEv.getTaskAttemptID();
            Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
            int numMapTasks = job.getTotalMaps();
            int numReduceTasks = job.getTotalReduces();
            // YARN (tracking) Task:
            org.apache.hadoop.mapreduce.v2.app.job.Task ytask = job.getTask(attemptID.getTaskId());
            // clreplacedic mapred Task:
            org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();
            // after "launching," send launched event to task attempt to move
            // state from replacedIGNED to RUNNING (also nukes "remoteTask", so must
            // do getRemoteTask() call first)
            // There is no port number because we are not really talking to a task
            // tracker.  The shuffle is just done through local files.  So the
            // port number is set to -1 in this case.
            context.getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(attemptID, -1));
            if (numMapTasks == 0) {
                doneWithMaps = true;
            }
            try {
                if (remoteTask.isMapOrReduce()) {
                    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
                    jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
                    if (remoteTask.isMapTask()) {
                        jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
                    } else {
                        jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
                    }
                    context.getEventHandler().handle(jce);
                }
                runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks, (numReduceTasks > 0), localMapFiles);
            } catch (RuntimeException re) {
                JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
                jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
                context.getEventHandler().handle(jce);
                // this is our signal that the subtask failed in some way, so
                // simulate a failed JVM/container and send a container-completed
                // event to task attempt (i.e., move state machine from RUNNING
                // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
                context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
            } catch (IOException ioe) {
                // if umbilical itself barfs (in error-handler of runSubMap()),
                // we're pretty much hosed, so do what YarnChild main() does
                // (i.e., exit clumsily--but can never happen, so no worries!)
                LOG.fatal("oopsie...  this can never happen: " + StringUtils.stringifyException(ioe));
                ExitUtil.terminate(-1);
            } finally {
                // remove my future
                if (futures.remove(attemptID) != null) {
                    LOG.info("removed attempt " + attemptID + " from the futures to keep track of");
                }
            }
        }

        private void runSubtask(org.apache.hadoop.mapred.Task task, final TaskType taskType, TaskAttemptId attemptID, final int numMapTasks, boolean renameOutputs, Map<TaskAttemptID, MapOutputFile> localMapFiles) throws RuntimeException, IOException {
            org.apache.hadoop.mapred.TaskAttemptID clreplacedicAttemptID = TypeConverter.fromYarn(attemptID);
            try {
                JobConf conf = new JobConf(getConfig());
                conf.set(JobContext.TASK_ID, task.getTaskID().toString());
                conf.set(JobContext.TASK_ATTEMPT_ID, clreplacedicAttemptID.toString());
                conf.setBoolean(JobContext.TASK_ISMAP, (taskType == TaskType.MAP));
                conf.setInt(JobContext.TASK_PARreplacedION, task.getParreplacedion());
                conf.set(JobContext.ID, task.getJobID().toString());
                // Use the AM's local dir env to generate the intermediate step
                // output files
                String[] localSysDirs = StringUtils.getTrimmedStrings(System.getenv(Environment.LOCAL_DIRS.name()));
                conf.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
                LOG.info(MRConfig.LOCAL_DIR + " for uber task: " + conf.get(MRConfig.LOCAL_DIR));
                // mark this as an uberized subtask so it can set task counter
                // (longer-term/FIXME:  could redefine as job counter and send
                // "JobCounterEvent" to JobImpl on [successful] completion of subtask;
                // will need new Job state-machine transition and JobImpl jobCounters
                // map to handle)
                conf.setBoolean("mapreduce.task.uberized", true);
                // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
                // etc.), or just replacedume/hope the state machine(s) and uber-AM work
                // as expected?
                if (taskType == TaskType.MAP) {
                    if (doneWithMaps) {
                        LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task (" + attemptID + "), but should be finished with maps");
                        throw new RuntimeException();
                    }
                    MapTask map = (MapTask) task;
                    map.setConf(conf);
                    map.run(conf, umbilical);
                    if (renameOutputs) {
                        MapOutputFile renamed = renameMapOutputForReduce(conf, attemptID, map.getMapOutputFile());
                        localMapFiles.put(clreplacedicAttemptID, renamed);
                    }
                    relocalize();
                    if (++finishedSubMaps == numMapTasks) {
                        doneWithMaps = true;
                    }
                } else /* TaskType.REDUCE */
                {
                    if (!doneWithMaps) {
                        // check if event-queue empty?  whole idea of counting maps vs.
                        // checking event queue is a tad wacky...but could enforce ordering
                        // (replaceduming no "lost events") at LocalMRAppMaster [CURRENT BUG(?):
                        // doesn't send reduce event until maps all done]
                        LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task (" + attemptID + "), but not yet finished with maps");
                        throw new RuntimeException();
                    }
                    // a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner:
                    // set framework name to local to make task local
                    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
                    // bypreplaced shuffle
                    conf.set(MRConfig.MASTER_ADDRESS, "local");
                    ReduceTask reduce = (ReduceTask) task;
                    reduce.setLocalMapFiles(localMapFiles);
                    reduce.setConf(conf);
                    reduce.run(conf, umbilical);
                    relocalize();
                }
            } catch (FSError e) {
                LOG.fatal("FSError from child", e);
                // umbilical:  MRAppMaster creates (taskAttemptListener), preplacedes to us
                if (!ShutdownHookManager.get().isShutdownInProgress()) {
                    umbilical.fsError(clreplacedicAttemptID, e.getMessage());
                }
                throw new RuntimeException();
            } catch (Exception exception) {
                LOG.warn("Exception running local (uberized) 'child' : " + StringUtils.stringifyException(exception));
                try {
                    if (task != null) {
                        // do cleanup for the task
                        task.taskCleanup(umbilical);
                    }
                } catch (Exception e) {
                    LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
                }
                // Report back any failures, for diagnostic purposes
                umbilical.reportDiagnosticInfo(clreplacedicAttemptID, StringUtils.stringifyException(exception));
                throw new RuntimeException();
            } catch (Throwable throwable) {
                LOG.fatal("Error running local (uberized) 'child' : " + StringUtils.stringifyException(throwable));
                if (!ShutdownHookManager.get().isShutdownInProgress()) {
                    Throwable tCause = throwable.getCause();
                    String cause = (tCause == null) ? throwable.getMessage() : StringUtils.stringifyException(tCause);
                    umbilical.fatalError(clreplacedicAttemptID, cause);
                }
                throw new RuntimeException();
            }
        }

        /**
         * Also within the local filesystem, we need to restore the initial state
         * of the directory as much as possible.  Compare current contents against
         * the saved original state and nuke everything that doesn't belong, with
         * the exception of the renamed map outputs.
         *
         * Any jobs that go out of their way to rename or delete things from the
         * local directory are considered broken and deserve what they get...
         */
        private void relocalize() {
            File[] curLocalFiles = curDir.listFiles();
            for (int j = 0; j < curLocalFiles.length; ++j) {
                if (!localizedFiles.contains(curLocalFiles[j])) {
                    // found one that wasn't there before:  delete it
                    boolean deleted = false;
                    try {
                        if (curFC != null) {
                            // this is recursive, unlike File delete():
                            deleted = curFC.delete(new Path(curLocalFiles[j].getName()), true);
                        }
                    } catch (IOException e) {
                        deleted = false;
                    }
                    if (!deleted) {
                        LOG.warn("Unable to delete unexpected local file/dir " + curLocalFiles[j].getName() + ": insufficient permissions?");
                    }
                }
            }
        }
    }

    // end EventHandler
    /**
     * Within the _local_ filesystem (not HDFS), all activity takes place within
     * a subdir inside one of the LOCAL_DIRS
     * (${local.dir}/usercache/$user/appcache/$appId/$contId/),
     * and all sub-MapTasks create the same filename ("file.out").  Rename that
     * to something unique (e.g., "map_0.out") to avoid possible collisions.
     *
     * Longer-term, we'll modify [something] to use TaskAttemptID-based
     * filenames instead of "file.out". (All of this is entirely internal,
     * so there are no particular compatibility issues.)
     */
    @VisibleForTesting
    protected static MapOutputFile renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
        FileSystem localFs = FileSystem.getLocal(conf);
        // move map output to reduce input
        Path mapOut = subMapOutputFile.getOutputFile();
        FileStatus mStatus = localFs.getFileStatus(mapOut);
        Path reduceIn = subMapOutputFile.getInputFileForWrite(TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
        Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
        Path reduceInIndex = new Path(reduceIn.toString() + ".index");
        if (LOG.isDebugEnabled()) {
            LOG.debug("Renaming map output file for task attempt " + mapId.toString() + " from original location " + mapOut.toString() + " to destination " + reduceIn.toString());
        }
        if (!localFs.mkdirs(reduceIn.getParent())) {
            throw new IOException("Mkdirs failed to create " + reduceIn.getParent().toString());
        }
        if (!localFs.rename(mapOut, reduceIn))
            throw new IOException("Couldn't rename " + mapOut);
        if (!localFs.rename(mapOutIndex, reduceInIndex))
            throw new IOException("Couldn't rename " + mapOutIndex);
        return new RenamedMapOutputFile(reduceIn);
    }

    private static clreplaced RenamedMapOutputFile extends MapOutputFile {

        private Path path;

        public RenamedMapOutputFile(Path path) {
            this.path = path;
        }

        @Override
        public Path getOutputFile() throws IOException {
            return path;
        }

        @Override
        public Path getOutputFileForWrite(long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputFileForWriteInVolume(Path existing) {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFile() throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFileForWrite(long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFileForWriteInVolume(Path existing) {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillFile(int spillNumber) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillFileForWrite(int spillNumber, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillIndexFile(int spillNumber) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getInputFile(int mapId) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getInputFileForWrite(TaskID mapId, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public void removeAll() throws IOException {
            throw new UnsupportedOperationException();
        }
    }
}

15 Source : LocalContainerLauncher.java
with Apache License 2.0
from naver

/**
 * Runs the container task locally in a thread.
 * Since all (sub)tasks share the same local directory, they must be executed
 * sequentially in order to avoid creating/deleting the same files/dirs.
 */
public clreplaced LocalContainerLauncher extends AbstractService implements ContainerLauncher {

    private static final File curDir = new File(".");

    private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.clreplaced);

    private FileContext curFC = null;

    private final HashSet<File> localizedFiles;

    private final AppContext context;

    private final TaskUmbilicalProtocol umbilical;

    private ExecutorService taskRunner;

    private Thread eventHandler;

    private byte[] encryptedSpillKey = new byte[] { 0 };

    private BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>();

    public LocalContainerLauncher(AppContext context, TaskUmbilicalProtocol umbilical) {
        super(LocalContainerLauncher.clreplaced.getName());
        this.context = context;
        this.umbilical = umbilical;
        // umbilical:  MRAppMaster creates (taskAttemptListener), preplacedes to us
        // (TODO/FIXME:  pointless to use RPC to talk to self; should create
        // LocalTaskAttemptListener or similar:  implement umbilical protocol
        // but skip RPC stuff)
        try {
            curFC = FileContext.getFileContext(curDir.toURI());
        } catch (UnsupportedFileSystemException ufse) {
            LOG.error("Local filesystem " + curDir.toURI().toString() + " is unsupported?? (should never happen)");
        }
        // Save list of files/dirs that are supposed to be present so can delete
        // any extras created by one task before starting subsequent task.  Note
        // that there's no protection against deleted or renamed localization;
        // users who do that get what they deserve (and will have to disable
        // uberization in order to run correctly).
        File[] curLocalFiles = curDir.listFiles();
        localizedFiles = new HashSet<File>(curLocalFiles.length);
        for (int j = 0; j < curLocalFiles.length; ++j) {
            localizedFiles.add(curLocalFiles[j]);
        }
    // Relocalization note/future FIXME (per chrisdo, 20110315):  At moment,
    // full localization info is in AppSubmissionContext preplaceded from client to
    // RM and then to NM for AM-container launch:  no difference between AM-
    // localization and MapTask- or ReduceTask-localization, so can replacedume all
    // OK.  Longer-term, will need to override uber-AM container-localization
    // request ("needed resources") with union of regular-AM-resources + task-
    // resources (and, if maps and reduces ever differ, then union of all three
    // types), OR will need localizer service/API that uber-AM can request
    // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
    }

    public void serviceStart() throws Exception {
        // create a single thread for serial execution of tasks
        // make it a daemon thread so that the process can exit even if the task is
        // not interruptible
        taskRunner = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("uber-SubtaskRunner").build());
        // create and start an event handling thread
        eventHandler = new Thread(new EventHandler(), "uber-EventHandler");
        eventHandler.start();
        super.serviceStart();
    }

    public void serviceStop() throws Exception {
        if (eventHandler != null) {
            eventHandler.interrupt();
        }
        if (taskRunner != null) {
            taskRunner.shutdownNow();
        }
        super.serviceStop();
    }

    @Override
    public void handle(ContainerLauncherEvent event) {
        try {
            eventQueue.put(event);
        } catch (InterruptedException e) {
            // FIXME? YarnRuntimeException is "for runtime exceptions only"
            throw new YarnRuntimeException(e);
        }
    }

    public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
        if (encryptedSpillKey != null) {
            this.encryptedSpillKey = encryptedSpillKey;
        }
    }

    /*
   * Uber-AM lifecycle/ordering ("normal" case):
   *
   * - [somebody] sends TA_replacedIGNED
   *   - handled by ContainerreplacedignedTransition (TaskAttemptImpl.java)
   *     - creates "remoteTask" for us == real Task
   *     - sends CONTAINER_REMOTE_LAUNCH
   *     - TA: UNreplacedIGNED -> replacedIGNED
   * - CONTAINER_REMOTE_LAUNCH handled by LocalContainerLauncher (us)
   *   - sucks "remoteTask" out of TaskAttemptImpl via getRemoteTask()
   *   - sends TA_CONTAINER_LAUNCHED
   *     [[ elsewhere...
   *       - TA_CONTAINER_LAUNCHED handled by LaunchedContainerTransition
   *         - registers "remoteTask" with TaskAttemptListener (== umbilical)
   *         - NUKES "remoteTask"
   *         - sends T_ATTEMPT_LAUNCHED (Task: SCHEDULED -> RUNNING)
   *         - TA: replacedIGNED -> RUNNING
   *     ]]
   *   - runs Task (runSubMap() or runSubReduce())
   *     - TA can safely send TA_UPDATE since in RUNNING state
   */
    private clreplaced EventHandler implements Runnable {

        // doneWithMaps and finishedSubMaps are accessed from only
        // one thread. Therefore, no need to make them volatile.
        private boolean doneWithMaps = false;

        private int finishedSubMaps = 0;

        private final Map<TaskAttemptId, Future<?>> futures = new ConcurrentHashMap<TaskAttemptId, Future<?>>();

        EventHandler() {
        }

        @SuppressWarnings("unchecked")
        @Override
        public void run() {
            ContainerLauncherEvent event = null;
            // Collect locations of map outputs to give to reduces
            final Map<TaskAttemptID, MapOutputFile> localMapFiles = new HashMap<TaskAttemptID, MapOutputFile>();
            // _must_ either run subtasks sequentially or accept expense of new JVMs
            // (i.e., fork()), else will get weird failures when maps try to create/
            // write same dirname or filename:  no chdir() in Java
            while (!Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    // mostly via T_KILL? JOB_KILL?
                    LOG.error("Returning, interrupted : " + e);
                    break;
                }
                LOG.info("Processing the event " + event.toString());
                if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
                    final ContainerRemoteLaunchEvent launchEv = (ContainerRemoteLaunchEvent) event;
                    // execute the task on a separate thread
                    Future<?> future = taskRunner.submit(new Runnable() {

                        public void run() {
                            runTask(launchEv, localMapFiles);
                        }
                    });
                    // remember the current attempt
                    futures.put(event.getTaskAttemptID(), future);
                } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
                    // cancel (and interrupt) the current running task replacedociated with the
                    // event
                    TaskAttemptId taId = event.getTaskAttemptID();
                    Future<?> future = futures.remove(taId);
                    if (future != null) {
                        LOG.info("canceling the task attempt " + taId);
                        future.cancel(true);
                    }
                    // send "cleaned" event to task attempt to move us from
                    // SUCCESS_CONTAINER_CLEANUP to SUCCEEDED state (or
                    // {FAIL|KILL}_CONTAINER_CLEANUP to {FAIL|KILL}_TASK_CLEANUP)
                    context.getEventHandler().handle(new TaskAttemptEvent(taId, TaskAttemptEventType.TA_CONTAINER_CLEANED));
                } else {
                    LOG.warn("Ignoring unexpected event " + event.toString());
                }
            }
        }

        @SuppressWarnings("unchecked")
        private void runTask(ContainerRemoteLaunchEvent launchEv, Map<TaskAttemptID, MapOutputFile> localMapFiles) {
            TaskAttemptId attemptID = launchEv.getTaskAttemptID();
            Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
            int numMapTasks = job.getTotalMaps();
            int numReduceTasks = job.getTotalReduces();
            // YARN (tracking) Task:
            org.apache.hadoop.mapreduce.v2.app.job.Task ytask = job.getTask(attemptID.getTaskId());
            // clreplacedic mapred Task:
            org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();
            // after "launching," send launched event to task attempt to move
            // state from replacedIGNED to RUNNING (also nukes "remoteTask", so must
            // do getRemoteTask() call first)
            // There is no port number because we are not really talking to a task
            // tracker.  The shuffle is just done through local files.  So the
            // port number is set to -1 in this case.
            context.getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(attemptID, -1));
            if (numMapTasks == 0) {
                doneWithMaps = true;
            }
            try {
                if (remoteTask.isMapOrReduce()) {
                    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
                    jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
                    if (remoteTask.isMapTask()) {
                        jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
                    } else {
                        jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
                    }
                    context.getEventHandler().handle(jce);
                }
                runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks, (numReduceTasks > 0), localMapFiles);
            } catch (RuntimeException re) {
                JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
                jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
                context.getEventHandler().handle(jce);
                // this is our signal that the subtask failed in some way, so
                // simulate a failed JVM/container and send a container-completed
                // event to task attempt (i.e., move state machine from RUNNING
                // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
                context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
            } catch (IOException ioe) {
                // if umbilical itself barfs (in error-handler of runSubMap()),
                // we're pretty much hosed, so do what YarnChild main() does
                // (i.e., exit clumsily--but can never happen, so no worries!)
                LOG.fatal("oopsie...  this can never happen: " + StringUtils.stringifyException(ioe));
                ExitUtil.terminate(-1);
            } finally {
                // remove my future
                if (futures.remove(attemptID) != null) {
                    LOG.info("removed attempt " + attemptID + " from the futures to keep track of");
                }
            }
        }

        private void runSubtask(org.apache.hadoop.mapred.Task task, final TaskType taskType, TaskAttemptId attemptID, final int numMapTasks, boolean renameOutputs, Map<TaskAttemptID, MapOutputFile> localMapFiles) throws RuntimeException, IOException {
            org.apache.hadoop.mapred.TaskAttemptID clreplacedicAttemptID = TypeConverter.fromYarn(attemptID);
            try {
                JobConf conf = new JobConf(getConfig());
                conf.set(JobContext.TASK_ID, task.getTaskID().toString());
                conf.set(JobContext.TASK_ATTEMPT_ID, clreplacedicAttemptID.toString());
                conf.setBoolean(JobContext.TASK_ISMAP, (taskType == TaskType.MAP));
                conf.setInt(JobContext.TASK_PARreplacedION, task.getParreplacedion());
                conf.set(JobContext.ID, task.getJobID().toString());
                // Use the AM's local dir env to generate the intermediate step
                // output files
                String[] localSysDirs = StringUtils.getTrimmedStrings(System.getenv(Environment.LOCAL_DIRS.name()));
                conf.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
                LOG.info(MRConfig.LOCAL_DIR + " for uber task: " + conf.get(MRConfig.LOCAL_DIR));
                // mark this as an uberized subtask so it can set task counter
                // (longer-term/FIXME:  could redefine as job counter and send
                // "JobCounterEvent" to JobImpl on [successful] completion of subtask;
                // will need new Job state-machine transition and JobImpl jobCounters
                // map to handle)
                conf.setBoolean("mapreduce.task.uberized", true);
                // Check and handle Encrypted spill key
                task.setEncryptedSpillKey(encryptedSpillKey);
                YarnChild.setEncryptedSpillKeyIfRequired(task);
                // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
                // etc.), or just replacedume/hope the state machine(s) and uber-AM work
                // as expected?
                if (taskType == TaskType.MAP) {
                    if (doneWithMaps) {
                        LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task (" + attemptID + "), but should be finished with maps");
                        throw new RuntimeException();
                    }
                    MapTask map = (MapTask) task;
                    map.setConf(conf);
                    map.run(conf, umbilical);
                    if (renameOutputs) {
                        MapOutputFile renamed = renameMapOutputForReduce(conf, attemptID, map.getMapOutputFile());
                        localMapFiles.put(clreplacedicAttemptID, renamed);
                    }
                    relocalize();
                    if (++finishedSubMaps == numMapTasks) {
                        doneWithMaps = true;
                    }
                } else /* TaskType.REDUCE */
                {
                    if (!doneWithMaps) {
                        // check if event-queue empty?  whole idea of counting maps vs.
                        // checking event queue is a tad wacky...but could enforce ordering
                        // (replaceduming no "lost events") at LocalMRAppMaster [CURRENT BUG(?):
                        // doesn't send reduce event until maps all done]
                        LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task (" + attemptID + "), but not yet finished with maps");
                        throw new RuntimeException();
                    }
                    // a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner:
                    // set framework name to local to make task local
                    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
                    // bypreplaced shuffle
                    conf.set(MRConfig.MASTER_ADDRESS, "local");
                    ReduceTask reduce = (ReduceTask) task;
                    reduce.setLocalMapFiles(localMapFiles);
                    reduce.setConf(conf);
                    reduce.run(conf, umbilical);
                    relocalize();
                }
            } catch (FSError e) {
                LOG.fatal("FSError from child", e);
                // umbilical:  MRAppMaster creates (taskAttemptListener), preplacedes to us
                if (!ShutdownHookManager.get().isShutdownInProgress()) {
                    umbilical.fsError(clreplacedicAttemptID, e.getMessage());
                }
                throw new RuntimeException();
            } catch (Exception exception) {
                LOG.warn("Exception running local (uberized) 'child' : " + StringUtils.stringifyException(exception));
                try {
                    if (task != null) {
                        // do cleanup for the task
                        task.taskCleanup(umbilical);
                    }
                } catch (Exception e) {
                    LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
                }
                // Report back any failures, for diagnostic purposes
                umbilical.reportDiagnosticInfo(clreplacedicAttemptID, StringUtils.stringifyException(exception));
                throw new RuntimeException();
            } catch (Throwable throwable) {
                LOG.fatal("Error running local (uberized) 'child' : " + StringUtils.stringifyException(throwable));
                if (!ShutdownHookManager.get().isShutdownInProgress()) {
                    Throwable tCause = throwable.getCause();
                    String cause = (tCause == null) ? throwable.getMessage() : StringUtils.stringifyException(tCause);
                    umbilical.fatalError(clreplacedicAttemptID, cause);
                }
                throw new RuntimeException();
            }
        }

        /**
         * Also within the local filesystem, we need to restore the initial state
         * of the directory as much as possible.  Compare current contents against
         * the saved original state and nuke everything that doesn't belong, with
         * the exception of the renamed map outputs.
         *
         * Any jobs that go out of their way to rename or delete things from the
         * local directory are considered broken and deserve what they get...
         */
        private void relocalize() {
            File[] curLocalFiles = curDir.listFiles();
            for (int j = 0; j < curLocalFiles.length; ++j) {
                if (!localizedFiles.contains(curLocalFiles[j])) {
                    // found one that wasn't there before:  delete it
                    boolean deleted = false;
                    try {
                        if (curFC != null) {
                            // this is recursive, unlike File delete():
                            deleted = curFC.delete(new Path(curLocalFiles[j].getName()), true);
                        }
                    } catch (IOException e) {
                        deleted = false;
                    }
                    if (!deleted) {
                        LOG.warn("Unable to delete unexpected local file/dir " + curLocalFiles[j].getName() + ": insufficient permissions?");
                    }
                }
            }
        }
    }

    // end EventHandler
    /**
     * Within the _local_ filesystem (not HDFS), all activity takes place within
     * a subdir inside one of the LOCAL_DIRS
     * (${local.dir}/usercache/$user/appcache/$appId/$contId/),
     * and all sub-MapTasks create the same filename ("file.out").  Rename that
     * to something unique (e.g., "map_0.out") to avoid possible collisions.
     *
     * Longer-term, we'll modify [something] to use TaskAttemptID-based
     * filenames instead of "file.out". (All of this is entirely internal,
     * so there are no particular compatibility issues.)
     */
    @VisibleForTesting
    protected static MapOutputFile renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
        FileSystem localFs = FileSystem.getLocal(conf);
        // move map output to reduce input
        Path mapOut = subMapOutputFile.getOutputFile();
        FileStatus mStatus = localFs.getFileStatus(mapOut);
        Path reduceIn = subMapOutputFile.getInputFileForWrite(TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
        Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
        Path reduceInIndex = new Path(reduceIn.toString() + ".index");
        if (LOG.isDebugEnabled()) {
            LOG.debug("Renaming map output file for task attempt " + mapId.toString() + " from original location " + mapOut.toString() + " to destination " + reduceIn.toString());
        }
        if (!localFs.mkdirs(reduceIn.getParent())) {
            throw new IOException("Mkdirs failed to create " + reduceIn.getParent().toString());
        }
        if (!localFs.rename(mapOut, reduceIn))
            throw new IOException("Couldn't rename " + mapOut);
        if (!localFs.rename(mapOutIndex, reduceInIndex))
            throw new IOException("Couldn't rename " + mapOutIndex);
        return new RenamedMapOutputFile(reduceIn);
    }

    private static clreplaced RenamedMapOutputFile extends MapOutputFile {

        private Path path;

        public RenamedMapOutputFile(Path path) {
            this.path = path;
        }

        @Override
        public Path getOutputFile() throws IOException {
            return path;
        }

        @Override
        public Path getOutputFileForWrite(long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputFileForWriteInVolume(Path existing) {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFile() throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFileForWrite(long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getOutputIndexFileForWriteInVolume(Path existing) {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillFile(int spillNumber) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillFileForWrite(int spillNumber, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillIndexFile(int spillNumber) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getSpillIndexFileForWrite(int spillNumber, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getInputFile(int mapId) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public Path getInputFileForWrite(TaskID mapId, long size) throws IOException {
            throw new UnsupportedOperationException();
        }

        @Override
        public void removeAll() throws IOException {
            throw new UnsupportedOperationException();
        }
    }
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testTasksView() {
    LOG.info("HsTasksPage");
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
    WebAppTests.testPage(HsTasksPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testJobCounterView() {
    LOG.info("JobCounterView");
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = TestAMWebApp.getJobParams(appContext);
    WebAppTests.testPage(HsCountersPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testJobCounterViewForKilledJob() {
    LOG.info("JobCounterViewForKilledJob");
    AppContext appContext = new MockAppContext(0, 1, 1, 1, true);
    Map<String, String> params = TestAMWebApp.getJobParams(appContext);
    WebAppTests.testPage(HsCountersPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testJobView() {
    LOG.info("HsJobPage");
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = TestAMWebApp.getJobParams(appContext);
    WebAppTests.testPage(HsJobPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testAttemptsView() {
    LOG.info("HsAttemptsPage");
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
    WebAppTests.testPage(HsAttemptsPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestHSWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testTaskView() {
    LOG.info("HsTaskPage");
    AppContext appContext = new MockAppContext(0, 1, 1, 1);
    Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
    WebAppTests.testPage(HsTaskPage.clreplaced, AppContext.clreplaced, appContext, params);
}

14 Source : TestAMWebServices.java
with Apache License 2.0
from NJUJYB

public void verifyAMInfoGeneric(AppContext ctx, String id, String user, String name, long startedOn, long elapsedTime) {
    WebServicesTestUtils.checkStringMatch("id", ctx.getApplicationID().toString(), id);
    WebServicesTestUtils.checkStringMatch("user", ctx.getUser().toString(), user);
    WebServicesTestUtils.checkStringMatch("name", ctx.getApplicationName(), name);
    replacedertEquals("startedOn incorrect", ctx.getStartTime(), startedOn);
    replacedertTrue("elapsedTime not greater then 0", (elapsedTime > 0));
}

14 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

@Test
public void testAppControllerIndex() {
    AppContext ctx = new MockAppContext(0, 1, 1, 1);
    Injector injector = WebAppTests.createMockInjector(AppContext.clreplaced, ctx);
    AppController controller = injector.getInstance(AppController.clreplaced);
    controller.index();
    replacedertEquals(ctx.getApplicationID().toString(), controller.get(APP_ID, ""));
}

14 Source : DefaultSpeculator.java
with Apache License 2.0
from NJUJYB

public clreplaced DefaultSpeculator extends AbstractService implements Speculator {

    private static final long ON_SCHEDULE = Long.MIN_VALUE;

    private static final long ALREADY_SPECULATING = Long.MIN_VALUE + 1;

    private static final long TOO_NEW = Long.MIN_VALUE + 2;

    private static final long PROGRESS_IS_GOOD = Long.MIN_VALUE + 3;

    private static final long NOT_RUNNING = Long.MIN_VALUE + 4;

    private static final long TOO_LATE_TO_SPECULATE = Long.MIN_VALUE + 5;

    private static final long SOONEST_RETRY_AFTER_NO_SPECULATE = 1000L * 1L;

    private static final long SOONEST_RETRY_AFTER_SPECULATE = 1000L * 15L;

    private static final double PROPORTION_RUNNING_TASKS_SPECULATABLE = 0.1;

    private static final double PROPORTION_TOTAL_TASKS_SPECULATABLE = 0.01;

    private static final int MINIMUM_ALLOWED_SPECULATIVE_TASKS = 10;

    private static final Log LOG = LogFactory.getLog(DefaultSpeculator.clreplaced);

    private final ConcurrentMap<TaskId, Boolean> runningTasks = new ConcurrentHashMap<TaskId, Boolean>();

    // Used to track any TaskAttempts that aren't heart-beating for a while, so
    // that we can aggressively speculate instead of waiting for task-timeout.
    private final ConcurrentMap<TaskAttemptId, TaskAttemptHistoryStatistics> runningTaskAttemptStatistics = new ConcurrentHashMap<TaskAttemptId, TaskAttemptHistoryStatistics>();

    // Regular heartbeat from tasks is every 3 secs. So if we don't get a
    // heartbeat in 9 secs (3 heartbeats), we simulate a heartbeat with no change
    // in progress.
    private static final long MAX_WAITTING_TIME_FOR_HEARTBEAT = 9 * 1000;

    // These are the current needs, not the initial needs.  For each job, these
    // record the number of attempts that exist and that are actively
    // waiting for a container [as opposed to running or finished]
    private final ConcurrentMap<JobId, AtomicInteger> mapContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>();

    private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>();

    private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>();

    private final Configuration conf;

    private AppContext context;

    private Thread speculationBackgroundThread = null;

    private volatile boolean stopped = false;

    private BlockingQueue<SpeculatorEvent> eventQueue = new LinkedBlockingQueue<SpeculatorEvent>();

    private TaskRuntimeEstimator estimator;

    private BlockingQueue<Object> scanControl = new LinkedBlockingQueue<Object>();

    private final Clock clock;

    private final EventHandler<TaskEvent> eventHandler;

    public DefaultSpeculator(Configuration conf, AppContext context) {
        this(conf, context, context.getClock());
    }

    public DefaultSpeculator(Configuration conf, AppContext context, Clock clock) {
        this(conf, context, getEstimator(conf, context), clock);
    }

    static private TaskRuntimeEstimator getEstimator(Configuration conf, AppContext context) {
        TaskRuntimeEstimator estimator;
        try {
            // "yarn.mapreduce.job.task.runtime.estimator.clreplaced"
            Clreplaced<? extends TaskRuntimeEstimator> estimatorClreplaced = conf.getClreplaced(MRJobConfig.MR_AM_TASK_ESTIMATOR, LegacyTaskRuntimeEstimator.clreplaced, TaskRuntimeEstimator.clreplaced);
            Constructor<? extends TaskRuntimeEstimator> estimatorConstructor = estimatorClreplaced.getConstructor();
            estimator = estimatorConstructor.newInstance();
            estimator.contextualize(conf, context);
        } catch (InstantiationException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (IllegalAccessException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (InvocationTargetException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (NoSuchMethodException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        }
        return estimator;
    }

    // This constructor is designed to be called by other constructors.
    // However, it's public because we do use it in the test cases.
    // Normally we figure out our own estimator.
    public DefaultSpeculator(Configuration conf, AppContext context, TaskRuntimeEstimator estimator, Clock clock) {
        super(DefaultSpeculator.clreplaced.getName());
        this.conf = conf;
        this.context = context;
        this.estimator = estimator;
        this.clock = clock;
        this.eventHandler = context.getEventHandler();
    }

    /*   *************************************************************    */
    // This is the task-mongering that creates the two new threads -- one for
    // processing events from the event queue and one for periodically
    // looking for speculation opportunities
    @Override
    protected void serviceStart() throws Exception {
        Runnable speculationBackgroundCore = new Runnable() {

            @Override
            public void run() {
                while (!stopped && !Thread.currentThread().isInterrupted()) {
                    long backgroundRunStartTime = clock.getTime();
                    try {
                        int speculations = computeSpeculations();
                        long mininumRecomp = speculations > 0 ? SOONEST_RETRY_AFTER_SPECULATE : SOONEST_RETRY_AFTER_NO_SPECULATE;
                        long wait = Math.max(mininumRecomp, clock.getTime() - backgroundRunStartTime);
                        if (speculations > 0) {
                            LOG.info("We launched " + speculations + " speculations.  Sleeping " + wait + " milliseconds.");
                        }
                        Object pollResult = scanControl.poll(wait, TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        if (!stopped) {
                            LOG.error("Background thread returning, interrupted", e);
                        }
                        return;
                    }
                }
            }
        };
        speculationBackgroundThread = new Thread(speculationBackgroundCore, "DefaultSpeculator background processing");
        speculationBackgroundThread.start();
        super.serviceStart();
    }

    @Override
    protected void serviceStop() throws Exception {
        stopped = true;
        // this could be called before background thread is established
        if (speculationBackgroundThread != null) {
            speculationBackgroundThread.interrupt();
        }
        super.serviceStop();
    }

    @Override
    public void handleAttempt(TaskAttemptStatus status) {
        long timestamp = clock.getTime();
        statusUpdate(status, timestamp);
    }

    // This section is not part of the Speculator interface; it's used only for
    // testing
    public boolean eventQueueEmpty() {
        return eventQueue.isEmpty();
    }

    // This interface is intended to be used only for test cases.
    public void scanForSpeculations() {
        LOG.info("We got asked to run a debug speculation scan.");
        // debug
        System.out.println("We got asked to run a debug speculation scan.");
        System.out.println("There are " + scanControl.size() + " events stacked already.");
        scanControl.add(new Object());
        Thread.yield();
    }

    /*   *************************************************************    */
    // This section contains the code that gets run for a SpeculatorEvent
    private AtomicInteger containerNeed(TaskId taskID) {
        JobId jobID = taskID.getJobId();
        TaskType taskType = taskID.getTaskType();
        ConcurrentMap<JobId, AtomicInteger> relevantMap = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
        AtomicInteger result = relevantMap.get(jobID);
        if (result == null) {
            relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
            result = relevantMap.get(jobID);
        }
        return result;
    }

    private synchronized void processSpeculatorEvent(SpeculatorEvent event) {
        switch(event.getType()) {
            case ATTEMPT_STATUS_UPDATE:
                statusUpdate(event.getReportedStatus(), event.getTimestamp());
                break;
            case TASK_CONTAINER_NEED_UPDATE:
                {
                    AtomicInteger need = containerNeed(event.getTaskID());
                    need.addAndGet(event.containersNeededChange());
                    break;
                }
            case ATTEMPT_START:
                {
                    LOG.info("ATTEMPT_START " + event.getTaskID());
                    estimator.enrollAttempt(event.getReportedStatus(), event.getTimestamp());
                    break;
                }
            case JOB_CREATE:
                {
                    LOG.info("JOB_CREATE " + event.getJobID());
                    estimator.contextualize(getConfig(), context);
                    break;
                }
        }
    }

    /**
     * Absorbs one TaskAttemptStatus
     *
     * @param reportedStatus the status report that we got from a task attempt
     *        that we want to fold into the speculation data for this job
     * @param timestamp the time this status corresponds to.  This matters
     *        because statuses contain progress.
     */
    protected void statusUpdate(TaskAttemptStatus reportedStatus, long timestamp) {
        String stateString = reportedStatus.taskState.toString();
        TaskAttemptId attemptID = reportedStatus.id;
        TaskId taskID = attemptID.getTaskId();
        Job job = context.getJob(taskID.getJobId());
        if (job == null) {
            return;
        }
        Task task = job.getTask(taskID);
        if (task == null) {
            return;
        }
        estimator.updateAttempt(reportedStatus, timestamp);
        if (stateString.equals(TaskAttemptState.RUNNING.name())) {
            runningTasks.putIfAbsent(taskID, Boolean.TRUE);
        } else {
            runningTasks.remove(taskID, Boolean.TRUE);
            if (!stateString.equals(TaskAttemptState.STARTING.name())) {
                runningTaskAttemptStatistics.remove(attemptID);
            }
        }
    }

    /*   *************************************************************    */
    // This is the code section that runs periodically and adds speculations for
    // those jobs that need them.
    // This can return a few magic values for tasks that shouldn't speculate:
    // returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
    // considering speculating this task
    // returns ALREADY_SPECULATING if that is true.  This has priority.
    // returns TOO_NEW if our companion task hasn't gotten any information
    // returns PROGRESS_IS_GOOD if the task is sailing through
    // returns NOT_RUNNING if the task is not running
    // 
    // All of these values are negative.  Any value that should be allowed to
    // speculate is 0 or positive.
    private long speculationValue(TaskId taskID, long now) {
        Job job = context.getJob(taskID.getJobId());
        Task task = job.getTask(taskID);
        Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
        long acceptableRuntime = Long.MIN_VALUE;
        long result = Long.MIN_VALUE;
        if (!mayHaveSpeculated.contains(taskID)) {
            acceptableRuntime = estimator.thresholdRuntime(taskID);
            if (acceptableRuntime == Long.MAX_VALUE) {
                return ON_SCHEDULE;
            }
        }
        TaskAttemptId runningTaskAttemptID = null;
        int numberRunningAttempts = 0;
        for (TaskAttempt taskAttempt : attempts.values()) {
            if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) {
                if (++numberRunningAttempts > 1) {
                    return ALREADY_SPECULATING;
                }
                runningTaskAttemptID = taskAttempt.getID();
                long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
                long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID);
                if (taskAttemptStartTime > now) {
                    // This background process ran before we could process the task
                    // attempt status change that chronicles the attempt start
                    return TOO_NEW;
                }
                long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
                long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID);
                float progress = taskAttempt.getProgress();
                TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID);
                if (data == null) {
                    runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now));
                } else {
                    if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) {
                        // Previous stats are same as same stats
                        if (data.notHeartbeatedInAWhile(now)) {
                            // Stats have stagnated for a while, simulate heart-beat.
                            TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
                            taskAttemptStatus.id = runningTaskAttemptID;
                            taskAttemptStatus.progress = progress;
                            taskAttemptStatus.taskState = taskAttempt.getState();
                            // Now simulate the heart-beat
                            handleAttempt(taskAttemptStatus);
                        }
                    } else {
                        // Stats have changed - update our data structure
                        data.setEstimatedRunTime(estimatedRunTime);
                        data.setProgress(progress);
                        data.resetHeartBeatTime(now);
                    }
                }
                if (estimatedEndTime < now) {
                    return PROGRESS_IS_GOOD;
                }
                if (estimatedReplacementEndTime >= estimatedEndTime) {
                    return TOO_LATE_TO_SPECULATE;
                }
                result = estimatedEndTime - estimatedReplacementEndTime;
            }
        }
        // If we are here, there's at most one task attempt.
        if (numberRunningAttempts == 0) {
            return NOT_RUNNING;
        }
        if (acceptableRuntime == Long.MIN_VALUE) {
            acceptableRuntime = estimator.thresholdRuntime(taskID);
            if (acceptableRuntime == Long.MAX_VALUE) {
                return ON_SCHEDULE;
            }
        }
        return result;
    }

    // Add attempt to a given Task.
    protected void addSpeculativeAttempt(TaskId taskID) {
        LOG.info("DefaultSpeculator.addSpeculativeAttempt -- we are speculating " + taskID);
        eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_ADD_SPEC_ATTEMPT));
        mayHaveSpeculated.add(taskID);
    }

    @Override
    public void handle(SpeculatorEvent event) {
        processSpeculatorEvent(event);
    }

    private int maybeScheduleAMapSpeculation() {
        return maybeScheduleASpeculation(TaskType.MAP);
    }

    private int maybeScheduleAReduceSpeculation() {
        return maybeScheduleASpeculation(TaskType.REDUCE);
    }

    private int maybeScheduleASpeculation(TaskType type) {
        int successes = 0;
        long now = clock.getTime();
        ConcurrentMap<JobId, AtomicInteger> containerNeeds = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
        for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
            // This race conditon is okay.  If we skip a speculation attempt we
            // should have tried because the event that lowers the number of
            // containers needed to zero hasn't come through, it will next time.
            // Also, if we miss the fact that the number of containers needed was
            // zero but increased due to a failure it's not too bad to launch one
            // container prematurely.
            if (jobEntry.getValue().get() > 0) {
                continue;
            }
            int numberSpeculationsAlready = 0;
            int numberRunningTasks = 0;
            // loop through the tasks of the kind
            Job job = context.getJob(jobEntry.getKey());
            Map<TaskId, Task> tasks = job.getTasks(type);
            int numberAllowedSpeculativeTasks = (int) Math.max(MINIMUM_ALLOWED_SPECULATIVE_TASKS, PROPORTION_TOTAL_TASKS_SPECULATABLE * tasks.size());
            TaskId bestTaskID = null;
            long bestSpeculationValue = -1L;
            // this loop is potentially pricey.
            // TODO track the tasks that are potentially worth looking at
            for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
                long mySpeculationValue = speculationValue(taskEntry.getKey(), now);
                if (mySpeculationValue == ALREADY_SPECULATING) {
                    ++numberSpeculationsAlready;
                }
                if (mySpeculationValue != NOT_RUNNING) {
                    ++numberRunningTasks;
                }
                if (mySpeculationValue > bestSpeculationValue) {
                    bestTaskID = taskEntry.getKey();
                    bestSpeculationValue = mySpeculationValue;
                }
            }
            numberAllowedSpeculativeTasks = (int) Math.max(numberAllowedSpeculativeTasks, PROPORTION_RUNNING_TASKS_SPECULATABLE * numberRunningTasks);
            // If we found a speculation target, fire it off
            if (bestTaskID != null && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
                addSpeculativeAttempt(bestTaskID);
                ++successes;
            }
        }
        return successes;
    }

    private int computeSpeculations() {
        // We'll try to issue one map and one reduce speculation per job per run
        return maybeScheduleAMapSpeculation() + maybeScheduleAReduceSpeculation();
    }

    static clreplaced TaskAttemptHistoryStatistics {

        private long estimatedRunTime;

        private float progress;

        private long lastHeartBeatTime;

        public TaskAttemptHistoryStatistics(long estimatedRunTime, float progress, long nonProgressStartTime) {
            this.estimatedRunTime = estimatedRunTime;
            this.progress = progress;
            resetHeartBeatTime(nonProgressStartTime);
        }

        public long getEstimatedRunTime() {
            return this.estimatedRunTime;
        }

        public float getProgress() {
            return this.progress;
        }

        public void setEstimatedRunTime(long estimatedRunTime) {
            this.estimatedRunTime = estimatedRunTime;
        }

        public void setProgress(float progress) {
            this.progress = progress;
        }

        public boolean notHeartbeatedInAWhile(long now) {
            if (now - lastHeartBeatTime <= MAX_WAITTING_TIME_FOR_HEARTBEAT) {
                return false;
            } else {
                resetHeartBeatTime(now);
                return true;
            }
        }

        public void resetHeartBeatTime(long lastHeartBeatTime) {
            this.lastHeartBeatTime = lastHeartBeatTime;
        }
    }
}

14 Source : DefaultSpeculator.java
with Apache License 2.0
from NJUJYB

static private TaskRuntimeEstimator getEstimator(Configuration conf, AppContext context) {
    TaskRuntimeEstimator estimator;
    try {
        // "yarn.mapreduce.job.task.runtime.estimator.clreplaced"
        Clreplaced<? extends TaskRuntimeEstimator> estimatorClreplaced = conf.getClreplaced(MRJobConfig.MR_AM_TASK_ESTIMATOR, LegacyTaskRuntimeEstimator.clreplaced, TaskRuntimeEstimator.clreplaced);
        Constructor<? extends TaskRuntimeEstimator> estimatorConstructor = estimatorClreplaced.getConstructor();
        estimator = estimatorConstructor.newInstance();
        estimator.contextualize(conf, context);
    } catch (InstantiationException ex) {
        LOG.error("Can't make a speculation runtime estimator", ex);
        throw new YarnRuntimeException(ex);
    } catch (IllegalAccessException ex) {
        LOG.error("Can't make a speculation runtime estimator", ex);
        throw new YarnRuntimeException(ex);
    } catch (InvocationTargetException ex) {
        LOG.error("Can't make a speculation runtime estimator", ex);
        throw new YarnRuntimeException(ex);
    } catch (NoSuchMethodException ex) {
        LOG.error("Can't make a speculation runtime estimator", ex);
        throw new YarnRuntimeException(ex);
    }
    return estimator;
}

14 Source : TaskAttemptListenerImpl.java
with Apache License 2.0
from NJUJYB

/**
 * This clreplaced is responsible for talking to the task umblical.
 * It also converts all the old data structures
 * to yarn data structures.
 *
 * This clreplaced HAS to be in this package to access package private
 * methods/clreplacedes.
 */
@SuppressWarnings({ "unchecked" })
public clreplaced TaskAttemptListenerImpl extends CompositeService implements TaskUmbilicalProtocol, TaskAttemptListener {

    private static final JvmTask TASK_FOR_INVALID_JVM = new JvmTask(null, true);

    private static final Log LOG = LogFactory.getLog(TaskAttemptListenerImpl.clreplaced);

    private AppContext context;

    private Server server;

    protected TaskHeartbeatHandler taskHeartbeatHandler;

    private RMHeartbeatHandler rmHeartbeatHandler;

    private long commitWindowMs;

    private InetSocketAddress address;

    private ConcurrentMap<WrappedJvmID, org.apache.hadoop.mapred.Task> jvmIDToActiveAttemptMap = new ConcurrentHashMap<WrappedJvmID, org.apache.hadoop.mapred.Task>();

    private Set<WrappedJvmID> launchedJVMs = Collections.newSetFromMap(new ConcurrentHashMap<WrappedJvmID, Boolean>());

    private JobTokenSecretManager jobTokenSecretManager = null;

    public TaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler) {
        super(TaskAttemptListenerImpl.clreplaced.getName());
        this.context = context;
        this.jobTokenSecretManager = jobTokenSecretManager;
        this.rmHeartbeatHandler = rmHeartbeatHandler;
    }

    @Override
    protected void serviceInit(Configuration conf) throws Exception {
        registerHeartbeatHandler(conf);
        commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS, MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
        super.serviceInit(conf);
    }

    @Override
    protected void serviceStart() throws Exception {
        startRpcServer();
        super.serviceStart();
    }

    protected void registerHeartbeatHandler(Configuration conf) {
        taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(), context.getClock(), conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT));
        addService(taskHeartbeatHandler);
    }

    protected void startRpcServer() {
        Configuration conf = getConfig();
        try {
            server = new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.clreplaced).setInstance(this).setBindAddress("0.0.0.0").setPort(0).setNumHandlers(conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT)).setVerbose(false).setSecretManager(jobTokenSecretManager).build();
            // Enable service authorization?
            if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
                refreshServiceAcls(conf, new MRAMPolicyProvider());
            }
            server.start();
            this.address = NetUtils.createSocketAddrForHost(context.getNMHostname(), server.getListenerAddress().getPort());
        } catch (IOException e) {
            throw new YarnRuntimeException(e);
        }
    }

    void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
        this.server.refreshServiceAcl(configuration, policyProvider);
    }

    @Override
    protected void serviceStop() throws Exception {
        stopRpcServer();
        super.serviceStop();
    }

    protected void stopRpcServer() {
        if (server != null) {
            server.stop();
        }
    }

    @Override
    public InetSocketAddress getAddress() {
        return address;
    }

    /**
     * Child checking whether it can commit.
     *
     * <br/>
     * Commit is a two-phased protocol. First the attempt informs the
     * ApplicationMaster that it is
     * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
     * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
     * a legacy from the centralized commit protocol handling by the JobTracker.
     */
    @Override
    public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
        // An attempt is asking if it can commit its output. This can be decided
        // only by the task which is managing the multiple attempts. So redirect the
        // request there.
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // tell task to retry later if AM has not heard from RM within the commit
        // window to help avoid double-committing in a split-brain situation
        long now = context.getClock().getTime();
        if (now - rmHeartbeatHandler.getLastHeartbeatTime() > commitWindowMs) {
            return false;
        }
        Job job = context.getJob(attemptID.getTaskId().getJobId());
        Task task = job.getTask(attemptID.getTaskId());
        return task.canCommit(attemptID);
    }

    /**
     * TaskAttempt is reporting that it is in commit_pending and it is waiting for
     * the commit Response
     *
     * <br/>
     * Commit it a two-phased protocol. First the attempt informs the
     * ApplicationMaster that it is
     * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
     * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
     * a legacy from the centralized commit protocol handling by the JobTracker.
     */
    @Override
    public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu) throws IOException, InterruptedException {
        LOG.info("Commit-pending state update from " + taskAttemptID.toString());
        // An attempt is asking if it can commit its output. This can be decided
        // only by the task which is managing the multiple attempts. So redirect the
        // request there.
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // Ignorable TaskStatus? - since a task will send a LastStatusUpdate
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_COMMIT_PENDING));
    }

    @Override
    public void done(TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("Done acknowledgement from " + taskAttemptID.toString());
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
    }

    @Override
    public void fatalError(TaskAttemptID taskAttemptID, String msg) throws IOException {
        // This happens only in Child and in the Task.
        LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
        reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
    }

    @Override
    public void fsError(TaskAttemptID taskAttemptID, String message) throws IOException {
        // This happens only in Child.
        LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: " + message);
        reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
    }

    @Override
    public void shuffleError(TaskAttemptID taskAttemptID, String message) throws IOException {
    // TODO: This isn't really used in any MR code. Ask for removal.
    }

    @Override
    public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobIdentifier, int startIndex, int maxEvents, TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("MapCompletionEvents request from " + taskAttemptID.toString() + ". startIndex " + startIndex + " maxEvents " + maxEvents);
        // TODO: shouldReset is never used. See TT. Ask for Removal.
        boolean shouldReset = false;
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        TaskCompletionEvent[] events = context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(startIndex, maxEvents);
        taskHeartbeatHandler.progressing(attemptID);
        return new MapTaskCompletionEventsUpdate(events, shouldReset);
    }

    @Override
    public boolean ping(TaskAttemptID taskAttemptID) throws IOException {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Ping from " + taskAttemptID.toString());
        }
        return true;
    }

    @Override
    public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo) throws IOException {
        diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
        LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": " + diagnosticInfo);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // This is mainly used for cases where we want to propagate exception traces
        // of tasks that fail.
        // This call exists as a hadoop mapreduce legacy wherein all changes in
        // counters/progress/phase/output-size are reported through statusUpdate()
        // call but not diagnosticInformation.
        context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
    }

    @Override
    public boolean statusUpdate(TaskAttemptID taskAttemptID, TaskStatus taskStatus) throws IOException, InterruptedException {
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(yarnAttemptID);
        TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
        taskAttemptStatus.id = yarnAttemptID;
        // Task sends the updated progress to the TT.
        taskAttemptStatus.progress = taskStatus.getProgress();
        LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : " + taskStatus.getProgress());
        // Task sends the updated state-string to the TT.
        taskAttemptStatus.stateString = taskStatus.getStateString();
        // Task sends the updated phase to the TT.
        taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
        // Counters are updated by the task. Convert counters into new format as
        // that is the primary storage format inside the AM to avoid multiple
        // conversions and unnecessary heap usage.
        taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(taskStatus.getCounters());
        // Map Finish time set by the task (map only)
        if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
            taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
        }
        // Shuffle Finish time set by the task (reduce only).
        if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
            taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
        }
        // Sort finish time set by the task (reduce only).
        if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
            taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
        }
        // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
        // taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());
        // set the fetch failures
        if (taskStatus.getFetchFailedMaps() != null && taskStatus.getFetchFailedMaps().size() > 0) {
            taskAttemptStatus.fetchFailedMaps = new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
            for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
                taskAttemptStatus.fetchFailedMaps.add(TypeConverter.toYarn(failedMapId));
            }
        }
        // Task sends the information about the nextRecordRange to the TT
        // TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
        // taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
        // taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
        // taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
        // // This was used by TT to do counter updates only once every minute. So this
        // // isn't ever changed by the Task itself.
        // taskStatus.getIncludeCounters();
        context.getEventHandler().handle(new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id, taskAttemptStatus));
        return true;
    }

    @Override
    public long getProtocolVersion(String arg0, long arg1) throws IOException {
        return TaskUmbilicalProtocol.versionID;
    }

    @Override
    public void reportNextRecordRange(TaskAttemptID taskAttemptID, Range range) throws IOException {
        // This is used when the feature of skipping records is enabled.
        // This call exists as a hadoop mapreduce legacy wherein all changes in
        // counters/progress/phase/output-size are reported through statusUpdate()
        // call but not the next record range information.
        throw new IOException("Not yet implemented.");
    }

    @Override
    public JvmTask getTask(JvmContext context) throws IOException {
        // A rough imitation of code from TaskTracker.
        JVMId jvmId = context.jvmId;
        LOG.info("JVM with ID : " + jvmId + " asked for a task");
        JvmTask jvmTask = null;
        // TODO: Is it an authorized container to get a task? Otherwise return null.
        // TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update
        // to jobId and task-type.
        WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap, jvmId.getId());
        // Try to look up the task. We remove it directly as we don't give
        // multiple tasks to a JVM
        if (!jvmIDToActiveAttemptMap.containsKey(wJvmID)) {
            LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed.");
            jvmTask = TASK_FOR_INVALID_JVM;
        } else {
            if (!launchedJVMs.contains(wJvmID)) {
                jvmTask = null;
                LOG.info("JVM with ID: " + jvmId + " asking for task before AM launch registered. Given null task");
            } else {
                // remove the task as it is no more needed and free up the memory.
                // Also we have already told the JVM to process a task, so it is no
                // longer pending, and further request should ask it to exit.
                org.apache.hadoop.mapred.Task task = jvmIDToActiveAttemptMap.remove(wJvmID);
                launchedJVMs.remove(wJvmID);
                LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
                jvmTask = new JvmTask(task, false);
            }
        }
        return jvmTask;
    }

    @Override
    public void registerPendingTask(org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
        // Create the mapping so that it is easy to look up
        // when the jvm comes back to ask for Task.
        // A JVM not present in this map is an illegal task/JVM.
        jvmIDToActiveAttemptMap.put(jvmID, task);
    }

    @Override
    public void registerLaunchedTask(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID, WrappedJvmID jvmId) {
        // The AM considers the task to be launched (Has asked the NM to launch it)
        // The JVM will only be given a task after this registartion.
        launchedJVMs.add(jvmId);
        taskHeartbeatHandler.register(attemptID);
    }

    @Override
    public void unregister(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID, WrappedJvmID jvmID) {
        // Unregistration also comes from the same TaskAttempt which does the
        // registration. Events are ordered at TaskAttempt, so unregistration will
        // always come after registration.
        // Remove from launchedJVMs before jvmIDToActiveAttemptMap to avoid
        // synchronization issue with getTask(). getTask should be checking
        // jvmIDToActiveAttemptMap before it checks launchedJVMs.
        // remove the mappings if not already removed
        launchedJVMs.remove(jvmID);
        jvmIDToActiveAttemptMap.remove(jvmID);
        // unregister this attempt
        taskHeartbeatHandler.unregister(attemptID);
    }

    @Override
    public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int clientMethodsHash) throws IOException {
        return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion, clientMethodsHash);
    }
}

14 Source : DefaultSpeculator.java
with Apache License 2.0
from naver

public clreplaced DefaultSpeculator extends AbstractService implements Speculator {

    private static final long ON_SCHEDULE = Long.MIN_VALUE;

    private static final long ALREADY_SPECULATING = Long.MIN_VALUE + 1;

    private static final long TOO_NEW = Long.MIN_VALUE + 2;

    private static final long PROGRESS_IS_GOOD = Long.MIN_VALUE + 3;

    private static final long NOT_RUNNING = Long.MIN_VALUE + 4;

    private static final long TOO_LATE_TO_SPECULATE = Long.MIN_VALUE + 5;

    private long soonestRetryAfterNoSpeculate;

    private long soonestRetryAfterSpeculate;

    private double proportionRunningTasksSpeculatable;

    private double proportionTotalTasksSpeculatable;

    private int minimumAllowedSpeculativeTasks;

    private static final Log LOG = LogFactory.getLog(DefaultSpeculator.clreplaced);

    private final ConcurrentMap<TaskId, Boolean> runningTasks = new ConcurrentHashMap<TaskId, Boolean>();

    // Used to track any TaskAttempts that aren't heart-beating for a while, so
    // that we can aggressively speculate instead of waiting for task-timeout.
    private final ConcurrentMap<TaskAttemptId, TaskAttemptHistoryStatistics> runningTaskAttemptStatistics = new ConcurrentHashMap<TaskAttemptId, TaskAttemptHistoryStatistics>();

    // Regular heartbeat from tasks is every 3 secs. So if we don't get a
    // heartbeat in 9 secs (3 heartbeats), we simulate a heartbeat with no change
    // in progress.
    private static final long MAX_WAITTING_TIME_FOR_HEARTBEAT = 9 * 1000;

    // These are the current needs, not the initial needs.  For each job, these
    // record the number of attempts that exist and that are actively
    // waiting for a container [as opposed to running or finished]
    private final ConcurrentMap<JobId, AtomicInteger> mapContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>();

    private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>();

    private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>();

    private final Configuration conf;

    private AppContext context;

    private Thread speculationBackgroundThread = null;

    private volatile boolean stopped = false;

    private BlockingQueue<SpeculatorEvent> eventQueue = new LinkedBlockingQueue<SpeculatorEvent>();

    private TaskRuntimeEstimator estimator;

    private BlockingQueue<Object> scanControl = new LinkedBlockingQueue<Object>();

    private final Clock clock;

    private final EventHandler<TaskEvent> eventHandler;

    public DefaultSpeculator(Configuration conf, AppContext context) {
        this(conf, context, context.getClock());
    }

    public DefaultSpeculator(Configuration conf, AppContext context, Clock clock) {
        this(conf, context, getEstimator(conf, context), clock);
    }

    static private TaskRuntimeEstimator getEstimator(Configuration conf, AppContext context) {
        TaskRuntimeEstimator estimator;
        try {
            // "yarn.mapreduce.job.task.runtime.estimator.clreplaced"
            Clreplaced<? extends TaskRuntimeEstimator> estimatorClreplaced = conf.getClreplaced(MRJobConfig.MR_AM_TASK_ESTIMATOR, LegacyTaskRuntimeEstimator.clreplaced, TaskRuntimeEstimator.clreplaced);
            Constructor<? extends TaskRuntimeEstimator> estimatorConstructor = estimatorClreplaced.getConstructor();
            estimator = estimatorConstructor.newInstance();
            estimator.contextualize(conf, context);
        } catch (InstantiationException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (IllegalAccessException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (InvocationTargetException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        } catch (NoSuchMethodException ex) {
            LOG.error("Can't make a speculation runtime estimator", ex);
            throw new YarnRuntimeException(ex);
        }
        return estimator;
    }

    // This constructor is designed to be called by other constructors.
    // However, it's public because we do use it in the test cases.
    // Normally we figure out our own estimator.
    public DefaultSpeculator(Configuration conf, AppContext context, TaskRuntimeEstimator estimator, Clock clock) {
        super(DefaultSpeculator.clreplaced.getName());
        this.conf = conf;
        this.context = context;
        this.estimator = estimator;
        this.clock = clock;
        this.eventHandler = context.getEventHandler();
        this.soonestRetryAfterNoSpeculate = conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE);
        this.soonestRetryAfterSpeculate = conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE);
        this.proportionRunningTasksSpeculatable = conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS);
        this.proportionTotalTasksSpeculatable = conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS);
        this.minimumAllowedSpeculativeTasks = conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS);
    }

    /*   *************************************************************    */
    // This is the task-mongering that creates the two new threads -- one for
    // processing events from the event queue and one for periodically
    // looking for speculation opportunities
    @Override
    protected void serviceStart() throws Exception {
        Runnable speculationBackgroundCore = new Runnable() {

            @Override
            public void run() {
                while (!stopped && !Thread.currentThread().isInterrupted()) {
                    long backgroundRunStartTime = clock.getTime();
                    try {
                        int speculations = computeSpeculations();
                        long mininumRecomp = speculations > 0 ? soonestRetryAfterSpeculate : soonestRetryAfterNoSpeculate;
                        long wait = Math.max(mininumRecomp, clock.getTime() - backgroundRunStartTime);
                        if (speculations > 0) {
                            LOG.info("We launched " + speculations + " speculations.  Sleeping " + wait + " milliseconds.");
                        }
                        Object pollResult = scanControl.poll(wait, TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        if (!stopped) {
                            LOG.error("Background thread returning, interrupted", e);
                        }
                        return;
                    }
                }
            }
        };
        speculationBackgroundThread = new Thread(speculationBackgroundCore, "DefaultSpeculator background processing");
        speculationBackgroundThread.start();
        super.serviceStart();
    }

    @Override
    protected void serviceStop() throws Exception {
        stopped = true;
        // this could be called before background thread is established
        if (speculationBackgroundThread != null) {
            speculationBackgroundThread.interrupt();
        }
        super.serviceStop();
    }

    @Override
    public void handleAttempt(TaskAttemptStatus status) {
        long timestamp = clock.getTime();
        statusUpdate(status, timestamp);
    }

    // This section is not part of the Speculator interface; it's used only for
    // testing
    public boolean eventQueueEmpty() {
        return eventQueue.isEmpty();
    }

    // This interface is intended to be used only for test cases.
    public void scanForSpeculations() {
        LOG.info("We got asked to run a debug speculation scan.");
        // debug
        System.out.println("We got asked to run a debug speculation scan.");
        System.out.println("There are " + scanControl.size() + " events stacked already.");
        scanControl.add(new Object());
        Thread.yield();
    }

    /*   *************************************************************    */
    // This section contains the code that gets run for a SpeculatorEvent
    private AtomicInteger containerNeed(TaskId taskID) {
        JobId jobID = taskID.getJobId();
        TaskType taskType = taskID.getTaskType();
        ConcurrentMap<JobId, AtomicInteger> relevantMap = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
        AtomicInteger result = relevantMap.get(jobID);
        if (result == null) {
            relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
            result = relevantMap.get(jobID);
        }
        return result;
    }

    private synchronized void processSpeculatorEvent(SpeculatorEvent event) {
        switch(event.getType()) {
            case ATTEMPT_STATUS_UPDATE:
                statusUpdate(event.getReportedStatus(), event.getTimestamp());
                break;
            case TASK_CONTAINER_NEED_UPDATE:
                {
                    AtomicInteger need = containerNeed(event.getTaskID());
                    need.addAndGet(event.containersNeededChange());
                    break;
                }
            case ATTEMPT_START:
                {
                    LOG.info("ATTEMPT_START " + event.getTaskID());
                    estimator.enrollAttempt(event.getReportedStatus(), event.getTimestamp());
                    break;
                }
            case JOB_CREATE:
                {
                    LOG.info("JOB_CREATE " + event.getJobID());
                    estimator.contextualize(getConfig(), context);
                    break;
                }
        }
    }

    /**
     * Absorbs one TaskAttemptStatus
     *
     * @param reportedStatus the status report that we got from a task attempt
     *        that we want to fold into the speculation data for this job
     * @param timestamp the time this status corresponds to.  This matters
     *        because statuses contain progress.
     */
    protected void statusUpdate(TaskAttemptStatus reportedStatus, long timestamp) {
        String stateString = reportedStatus.taskState.toString();
        TaskAttemptId attemptID = reportedStatus.id;
        TaskId taskID = attemptID.getTaskId();
        Job job = context.getJob(taskID.getJobId());
        if (job == null) {
            return;
        }
        Task task = job.getTask(taskID);
        if (task == null) {
            return;
        }
        estimator.updateAttempt(reportedStatus, timestamp);
        if (stateString.equals(TaskAttemptState.RUNNING.name())) {
            runningTasks.putIfAbsent(taskID, Boolean.TRUE);
        } else {
            runningTasks.remove(taskID, Boolean.TRUE);
            if (!stateString.equals(TaskAttemptState.STARTING.name())) {
                runningTaskAttemptStatistics.remove(attemptID);
            }
        }
    }

    /*   *************************************************************    */
    // This is the code section that runs periodically and adds speculations for
    // those jobs that need them.
    // This can return a few magic values for tasks that shouldn't speculate:
    // returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
    // considering speculating this task
    // returns ALREADY_SPECULATING if that is true.  This has priority.
    // returns TOO_NEW if our companion task hasn't gotten any information
    // returns PROGRESS_IS_GOOD if the task is sailing through
    // returns NOT_RUNNING if the task is not running
    // 
    // All of these values are negative.  Any value that should be allowed to
    // speculate is 0 or positive.
    private long speculationValue(TaskId taskID, long now) {
        Job job = context.getJob(taskID.getJobId());
        Task task = job.getTask(taskID);
        Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
        long acceptableRuntime = Long.MIN_VALUE;
        long result = Long.MIN_VALUE;
        if (!mayHaveSpeculated.contains(taskID)) {
            acceptableRuntime = estimator.thresholdRuntime(taskID);
            if (acceptableRuntime == Long.MAX_VALUE) {
                return ON_SCHEDULE;
            }
        }
        TaskAttemptId runningTaskAttemptID = null;
        int numberRunningAttempts = 0;
        for (TaskAttempt taskAttempt : attempts.values()) {
            if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) {
                if (++numberRunningAttempts > 1) {
                    return ALREADY_SPECULATING;
                }
                runningTaskAttemptID = taskAttempt.getID();
                long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
                long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID);
                if (taskAttemptStartTime > now) {
                    // This background process ran before we could process the task
                    // attempt status change that chronicles the attempt start
                    return TOO_NEW;
                }
                long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
                long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID);
                float progress = taskAttempt.getProgress();
                TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID);
                if (data == null) {
                    runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now));
                } else {
                    if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) {
                        // Previous stats are same as same stats
                        if (data.notHeartbeatedInAWhile(now)) {
                            // Stats have stagnated for a while, simulate heart-beat.
                            TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
                            taskAttemptStatus.id = runningTaskAttemptID;
                            taskAttemptStatus.progress = progress;
                            taskAttemptStatus.taskState = taskAttempt.getState();
                            // Now simulate the heart-beat
                            handleAttempt(taskAttemptStatus);
                        }
                    } else {
                        // Stats have changed - update our data structure
                        data.setEstimatedRunTime(estimatedRunTime);
                        data.setProgress(progress);
                        data.resetHeartBeatTime(now);
                    }
                }
                if (estimatedEndTime < now) {
                    return PROGRESS_IS_GOOD;
                }
                if (estimatedReplacementEndTime >= estimatedEndTime) {
                    return TOO_LATE_TO_SPECULATE;
                }
                result = estimatedEndTime - estimatedReplacementEndTime;
            }
        }
        // If we are here, there's at most one task attempt.
        if (numberRunningAttempts == 0) {
            return NOT_RUNNING;
        }
        if (acceptableRuntime == Long.MIN_VALUE) {
            acceptableRuntime = estimator.thresholdRuntime(taskID);
            if (acceptableRuntime == Long.MAX_VALUE) {
                return ON_SCHEDULE;
            }
        }
        return result;
    }

    // Add attempt to a given Task.
    protected void addSpeculativeAttempt(TaskId taskID) {
        LOG.info("DefaultSpeculator.addSpeculativeAttempt -- we are speculating " + taskID);
        eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_ADD_SPEC_ATTEMPT));
        mayHaveSpeculated.add(taskID);
    }

    @Override
    public void handle(SpeculatorEvent event) {
        processSpeculatorEvent(event);
    }

    private int maybeScheduleAMapSpeculation() {
        return maybeScheduleASpeculation(TaskType.MAP);
    }

    private int maybeScheduleAReduceSpeculation() {
        return maybeScheduleASpeculation(TaskType.REDUCE);
    }

    private int maybeScheduleASpeculation(TaskType type) {
        int successes = 0;
        long now = clock.getTime();
        ConcurrentMap<JobId, AtomicInteger> containerNeeds = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
        for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
            // This race conditon is okay.  If we skip a speculation attempt we
            // should have tried because the event that lowers the number of
            // containers needed to zero hasn't come through, it will next time.
            // Also, if we miss the fact that the number of containers needed was
            // zero but increased due to a failure it's not too bad to launch one
            // container prematurely.
            if (jobEntry.getValue().get() > 0) {
                continue;
            }
            int numberSpeculationsAlready = 0;
            int numberRunningTasks = 0;
            // loop through the tasks of the kind
            Job job = context.getJob(jobEntry.getKey());
            Map<TaskId, Task> tasks = job.getTasks(type);
            int numberAllowedSpeculativeTasks = (int) Math.max(minimumAllowedSpeculativeTasks, proportionTotalTasksSpeculatable * tasks.size());
            TaskId bestTaskID = null;
            long bestSpeculationValue = -1L;
            // this loop is potentially pricey.
            // TODO track the tasks that are potentially worth looking at
            for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
                long mySpeculationValue = speculationValue(taskEntry.getKey(), now);
                if (mySpeculationValue == ALREADY_SPECULATING) {
                    ++numberSpeculationsAlready;
                }
                if (mySpeculationValue != NOT_RUNNING) {
                    ++numberRunningTasks;
                }
                if (mySpeculationValue > bestSpeculationValue) {
                    bestTaskID = taskEntry.getKey();
                    bestSpeculationValue = mySpeculationValue;
                }
            }
            numberAllowedSpeculativeTasks = (int) Math.max(numberAllowedSpeculativeTasks, proportionRunningTasksSpeculatable * numberRunningTasks);
            // If we found a speculation target, fire it off
            if (bestTaskID != null && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
                addSpeculativeAttempt(bestTaskID);
                ++successes;
            }
        }
        return successes;
    }

    private int computeSpeculations() {
        // We'll try to issue one map and one reduce speculation per job per run
        return maybeScheduleAMapSpeculation() + maybeScheduleAReduceSpeculation();
    }

    static clreplaced TaskAttemptHistoryStatistics {

        private long estimatedRunTime;

        private float progress;

        private long lastHeartBeatTime;

        public TaskAttemptHistoryStatistics(long estimatedRunTime, float progress, long nonProgressStartTime) {
            this.estimatedRunTime = estimatedRunTime;
            this.progress = progress;
            resetHeartBeatTime(nonProgressStartTime);
        }

        public long getEstimatedRunTime() {
            return this.estimatedRunTime;
        }

        public float getProgress() {
            return this.progress;
        }

        public void setEstimatedRunTime(long estimatedRunTime) {
            this.estimatedRunTime = estimatedRunTime;
        }

        public void setProgress(float progress) {
            this.progress = progress;
        }

        public boolean notHeartbeatedInAWhile(long now) {
            if (now - lastHeartBeatTime <= MAX_WAITTING_TIME_FOR_HEARTBEAT) {
                return false;
            } else {
                resetHeartBeatTime(now);
                return true;
            }
        }

        public void resetHeartBeatTime(long lastHeartBeatTime) {
            this.lastHeartBeatTime = lastHeartBeatTime;
        }
    }

    @VisibleForTesting
    public long getSoonestRetryAfterNoSpeculate() {
        return soonestRetryAfterNoSpeculate;
    }

    @VisibleForTesting
    public long getSoonestRetryAfterSpeculate() {
        return soonestRetryAfterSpeculate;
    }

    @VisibleForTesting
    public double getProportionRunningTasksSpeculatable() {
        return proportionRunningTasksSpeculatable;
    }

    @VisibleForTesting
    public double getProportionTotalTasksSpeculatable() {
        return proportionTotalTasksSpeculatable;
    }

    @VisibleForTesting
    public int getMinimumAllowedSpeculativeTasks() {
        return minimumAllowedSpeculativeTasks;
    }
}

14 Source : TaskAttemptListenerImpl.java
with Apache License 2.0
from naver

/**
 * This clreplaced is responsible for talking to the task umblical.
 * It also converts all the old data structures
 * to yarn data structures.
 *
 * This clreplaced HAS to be in this package to access package private
 * methods/clreplacedes.
 */
@SuppressWarnings({ "unchecked" })
public clreplaced TaskAttemptListenerImpl extends CompositeService implements TaskUmbilicalProtocol, TaskAttemptListener {

    private static final JvmTask TASK_FOR_INVALID_JVM = new JvmTask(null, true);

    private static final Log LOG = LogFactory.getLog(TaskAttemptListenerImpl.clreplaced);

    private AppContext context;

    private Server server;

    protected TaskHeartbeatHandler taskHeartbeatHandler;

    private RMHeartbeatHandler rmHeartbeatHandler;

    private long commitWindowMs;

    private InetSocketAddress address;

    private ConcurrentMap<WrappedJvmID, org.apache.hadoop.mapred.Task> jvmIDToActiveAttemptMap = new ConcurrentHashMap<WrappedJvmID, org.apache.hadoop.mapred.Task>();

    private Set<WrappedJvmID> launchedJVMs = Collections.newSetFromMap(new ConcurrentHashMap<WrappedJvmID, Boolean>());

    private JobTokenSecretManager jobTokenSecretManager = null;

    private byte[] encryptedSpillKey;

    public TaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler, byte[] secretShuffleKey) {
        super(TaskAttemptListenerImpl.clreplaced.getName());
        this.context = context;
        this.jobTokenSecretManager = jobTokenSecretManager;
        this.rmHeartbeatHandler = rmHeartbeatHandler;
        this.encryptedSpillKey = secretShuffleKey;
    }

    @Override
    protected void serviceInit(Configuration conf) throws Exception {
        registerHeartbeatHandler(conf);
        commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS, MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
        super.serviceInit(conf);
    }

    @Override
    protected void serviceStart() throws Exception {
        startRpcServer();
        super.serviceStart();
    }

    protected void registerHeartbeatHandler(Configuration conf) {
        taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(), context.getClock(), conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT));
        addService(taskHeartbeatHandler);
    }

    protected void startRpcServer() {
        Configuration conf = getConfig();
        try {
            server = new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.clreplaced).setInstance(this).setBindAddress("0.0.0.0").setPort(0).setNumHandlers(conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT)).setVerbose(false).setSecretManager(jobTokenSecretManager).build();
            // Enable service authorization?
            if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
                refreshServiceAcls(conf, new MRAMPolicyProvider());
            }
            server.start();
            this.address = NetUtils.createSocketAddrForHost(context.getNMHostname(), server.getListenerAddress().getPort());
        } catch (IOException e) {
            throw new YarnRuntimeException(e);
        }
    }

    void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) {
        this.server.refreshServiceAcl(configuration, policyProvider);
    }

    @Override
    protected void serviceStop() throws Exception {
        stopRpcServer();
        super.serviceStop();
    }

    protected void stopRpcServer() {
        if (server != null) {
            server.stop();
        }
    }

    @Override
    public InetSocketAddress getAddress() {
        return address;
    }

    /**
     * Child checking whether it can commit.
     *
     * <br>
     * Commit is a two-phased protocol. First the attempt informs the
     * ApplicationMaster that it is
     * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
     * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
     * a legacy from the centralized commit protocol handling by the JobTracker.
     */
    @Override
    public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
        // An attempt is asking if it can commit its output. This can be decided
        // only by the task which is managing the multiple attempts. So redirect the
        // request there.
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // tell task to retry later if AM has not heard from RM within the commit
        // window to help avoid double-committing in a split-brain situation
        long now = context.getClock().getTime();
        if (now - rmHeartbeatHandler.getLastHeartbeatTime() > commitWindowMs) {
            return false;
        }
        Job job = context.getJob(attemptID.getTaskId().getJobId());
        Task task = job.getTask(attemptID.getTaskId());
        return task.canCommit(attemptID);
    }

    /**
     * TaskAttempt is reporting that it is in commit_pending and it is waiting for
     * the commit Response
     *
     * <br>
     * Commit it a two-phased protocol. First the attempt informs the
     * ApplicationMaster that it is
     * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
     * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
     * a legacy from the centralized commit protocol handling by the JobTracker.
     */
    @Override
    public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu) throws IOException, InterruptedException {
        LOG.info("Commit-pending state update from " + taskAttemptID.toString());
        // An attempt is asking if it can commit its output. This can be decided
        // only by the task which is managing the multiple attempts. So redirect the
        // request there.
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // Ignorable TaskStatus? - since a task will send a LastStatusUpdate
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_COMMIT_PENDING));
    }

    @Override
    public void done(TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("Done acknowledgement from " + taskAttemptID.toString());
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
    }

    @Override
    public void fatalError(TaskAttemptID taskAttemptID, String msg) throws IOException {
        // This happens only in Child and in the Task.
        LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
        reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
    }

    @Override
    public void fsError(TaskAttemptID taskAttemptID, String message) throws IOException {
        // This happens only in Child.
        LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: " + message);
        reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
    }

    @Override
    public void shuffleError(TaskAttemptID taskAttemptID, String message) throws IOException {
    // TODO: This isn't really used in any MR code. Ask for removal.
    }

    @Override
    public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobIdentifier, int startIndex, int maxEvents, TaskAttemptID taskAttemptID) throws IOException {
        LOG.info("MapCompletionEvents request from " + taskAttemptID.toString() + ". startIndex " + startIndex + " maxEvents " + maxEvents);
        // TODO: shouldReset is never used. See TT. Ask for Removal.
        boolean shouldReset = false;
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        TaskCompletionEvent[] events = context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(startIndex, maxEvents);
        taskHeartbeatHandler.progressing(attemptID);
        return new MapTaskCompletionEventsUpdate(events, shouldReset);
    }

    @Override
    public boolean ping(TaskAttemptID taskAttemptID) throws IOException {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Ping from " + taskAttemptID.toString());
        }
        return true;
    }

    @Override
    public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo) throws IOException {
        diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
        LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": " + diagnosticInfo);
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(attemptID);
        // This is mainly used for cases where we want to propagate exception traces
        // of tasks that fail.
        // This call exists as a hadoop mapreduce legacy wherein all changes in
        // counters/progress/phase/output-size are reported through statusUpdate()
        // call but not diagnosticInformation.
        context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
    }

    @Override
    public boolean statusUpdate(TaskAttemptID taskAttemptID, TaskStatus taskStatus) throws IOException, InterruptedException {
        org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID = TypeConverter.toYarn(taskAttemptID);
        taskHeartbeatHandler.progressing(yarnAttemptID);
        TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
        taskAttemptStatus.id = yarnAttemptID;
        // Task sends the updated progress to the TT.
        taskAttemptStatus.progress = taskStatus.getProgress();
        LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : " + taskStatus.getProgress());
        // Task sends the updated state-string to the TT.
        taskAttemptStatus.stateString = taskStatus.getStateString();
        // Task sends the updated phase to the TT.
        taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
        // Counters are updated by the task. Convert counters into new format as
        // that is the primary storage format inside the AM to avoid multiple
        // conversions and unnecessary heap usage.
        taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(taskStatus.getCounters());
        // Map Finish time set by the task (map only)
        if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
            taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
        }
        // Shuffle Finish time set by the task (reduce only).
        if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
            taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
        }
        // Sort finish time set by the task (reduce only).
        if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
            taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
        }
        // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
        // taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());
        // set the fetch failures
        if (taskStatus.getFetchFailedMaps() != null && taskStatus.getFetchFailedMaps().size() > 0) {
            taskAttemptStatus.fetchFailedMaps = new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
            for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
                taskAttemptStatus.fetchFailedMaps.add(TypeConverter.toYarn(failedMapId));
            }
        }
        // Task sends the information about the nextRecordRange to the TT
        // TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
        // taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
        // taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
        // taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
        // // This was used by TT to do counter updates only once every minute. So this
        // // isn't ever changed by the Task itself.
        // taskStatus.getIncludeCounters();
        context.getEventHandler().handle(new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id, taskAttemptStatus));
        return true;
    }

    @Override
    public long getProtocolVersion(String arg0, long arg1) throws IOException {
        return TaskUmbilicalProtocol.versionID;
    }

    @Override
    public void reportNextRecordRange(TaskAttemptID taskAttemptID, Range range) throws IOException {
        // This is used when the feature of skipping records is enabled.
        // This call exists as a hadoop mapreduce legacy wherein all changes in
        // counters/progress/phase/output-size are reported through statusUpdate()
        // call but not the next record range information.
        throw new IOException("Not yet implemented.");
    }

    @Override
    public JvmTask getTask(JvmContext context) throws IOException {
        // A rough imitation of code from TaskTracker.
        JVMId jvmId = context.jvmId;
        LOG.info("JVM with ID : " + jvmId + " asked for a task");
        JvmTask jvmTask = null;
        // TODO: Is it an authorized container to get a task? Otherwise return null.
        // TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update
        // to jobId and task-type.
        WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap, jvmId.getId());
        // Try to look up the task. We remove it directly as we don't give
        // multiple tasks to a JVM
        if (!jvmIDToActiveAttemptMap.containsKey(wJvmID)) {
            LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed.");
            jvmTask = TASK_FOR_INVALID_JVM;
        } else {
            if (!launchedJVMs.contains(wJvmID)) {
                jvmTask = null;
                LOG.info("JVM with ID: " + jvmId + " asking for task before AM launch registered. Given null task");
            } else {
                // remove the task as it is no more needed and free up the memory.
                // Also we have already told the JVM to process a task, so it is no
                // longer pending, and further request should ask it to exit.
                org.apache.hadoop.mapred.Task task = jvmIDToActiveAttemptMap.remove(wJvmID);
                launchedJVMs.remove(wJvmID);
                LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
                task.setEncryptedSpillKey(encryptedSpillKey);
                jvmTask = new JvmTask(task, false);
            }
        }
        return jvmTask;
    }

    @Override
    public void registerPendingTask(org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
        // Create the mapping so that it is easy to look up
        // when the jvm comes back to ask for Task.
        // A JVM not present in this map is an illegal task/JVM.
        jvmIDToActiveAttemptMap.put(jvmID, task);
    }

    @Override
    public void registerLaunchedTask(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID, WrappedJvmID jvmId) {
        // The AM considers the task to be launched (Has asked the NM to launch it)
        // The JVM will only be given a task after this registartion.
        launchedJVMs.add(jvmId);
        taskHeartbeatHandler.register(attemptID);
    }

    @Override
    public void unregister(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID, WrappedJvmID jvmID) {
        // Unregistration also comes from the same TaskAttempt which does the
        // registration. Events are ordered at TaskAttempt, so unregistration will
        // always come after registration.
        // Remove from launchedJVMs before jvmIDToActiveAttemptMap to avoid
        // synchronization issue with getTask(). getTask should be checking
        // jvmIDToActiveAttemptMap before it checks launchedJVMs.
        // remove the mappings if not already removed
        launchedJVMs.remove(jvmID);
        jvmIDToActiveAttemptMap.remove(jvmID);
        // unregister this attempt
        taskHeartbeatHandler.unregister(attemptID);
    }

    @Override
    public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int clientMethodsHash) throws IOException {
        return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion, clientMethodsHash);
    }
}

13 Source : TestAMWebServices.java
with Apache License 2.0
from NJUJYB

public void verifyBlacklistedNodesInfo(JSONObject blacklist, AppContext ctx) throws JSONException, Exception {
    JSONArray array = blacklist.getJSONArray("blacklistedNodes");
    replacedertEquals(array.length(), ctx.getBlacklistedNodes().size());
    for (int i = 0; i < array.length(); i++) {
        replacedertTrue(ctx.getBlacklistedNodes().contains(array.getString(i)));
    }
}

13 Source : TestAMWebApp.java
with Apache License 2.0
from NJUJYB

public static Map<String, String> getJobParams(AppContext appContext) {
    JobId jobId = appContext.getAllJobs().entrySet().iterator().next().getKey();
    Map<String, String> params = new HashMap<String, String>();
    params.put(AMParams.JOB_ID, MRApps.toString(jobId));
    return params;
}

13 Source : TestTaskAttemptListenerImpl.java
with Apache License 2.0
from NJUJYB

@Test(timeout = 5000)
public void testGetTask() throws IOException {
    AppContext appCtx = mock(AppContext.clreplaced);
    JobTokenSecretManager secret = mock(JobTokenSecretManager.clreplaced);
    RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.clreplaced);
    TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.clreplaced);
    MockTaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, hbHandler);
    Configuration conf = new Configuration();
    listener.init(conf);
    listener.start();
    JVMId id = new JVMId("foo", 1, true, 1);
    WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId());
    // Verify ask before registration.
    // The JVM ID has not been registered yet so we should kill it.
    JvmContext context = new JvmContext();
    context.jvmId = id;
    JvmTask result = listener.getTask(context);
    replacedertNotNull(result);
    replacedertTrue(result.shouldDie);
    // Verify ask after registration but before launch.
    // Don't kill, should be null.
    TaskAttemptId attemptID = mock(TaskAttemptId.clreplaced);
    Task task = mock(Task.clreplaced);
    // Now put a task with the ID
    listener.registerPendingTask(task, wid);
    result = listener.getTask(context);
    replacedertNull(result);
    // Unregister for more testing.
    listener.unregister(attemptID, wid);
    // Verify ask after registration and launch
    // Now put a task with the ID
    listener.registerPendingTask(task, wid);
    listener.registerLaunchedTask(attemptID, wid);
    verify(hbHandler).register(attemptID);
    result = listener.getTask(context);
    replacedertNotNull(result);
    replacedertFalse(result.shouldDie);
    // Don't unregister yet for more testing.
    // Verify that if we call it again a second time we are told to die.
    result = listener.getTask(context);
    replacedertNotNull(result);
    replacedertTrue(result.shouldDie);
    listener.unregister(attemptID, wid);
    // Verify after unregistration.
    result = listener.getTask(context);
    replacedertNotNull(result);
    replacedertTrue(result.shouldDie);
    listener.stop();
    // test JVMID
    JVMId jvmid = JVMId.forName("jvm_001_002_m_004");
    replacedertNotNull(jvmid);
    try {
        JVMId.forName("jvm_001_002_m_004_006");
        replacedert.fail();
    } catch (IllegalArgumentException e) {
        replacedertEquals(e.getMessage(), "TaskId string : jvm_001_002_m_004_006 is not properly formed");
    }
}

13 Source : TaskSpeculationPredicate.java
with Apache License 2.0
from NJUJYB

boolean canSpeculate(AppContext context, TaskId taskID) {
    // This clreplaced rejects speculating any task that already has speculations,
    // or isn't running.
    // Subclreplacedes should call TaskSpeculationPredicate.canSpeculate(...) , but
    // can be even more restrictive.
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    Task task = job.getTask(taskID);
    return task.getAttempts().size() == 1;
}

13 Source : ContainerLauncherImpl.java
with Apache License 2.0
from NJUJYB

/**
 * This clreplaced is responsible for launching of containers.
 */
public clreplaced ContainerLauncherImpl extends AbstractService implements ContainerLauncher {

    static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.clreplaced);

    private ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();

    private final AppContext context;

    protected ThreadPoolExecutor launcherPool;

    protected static final int INITIAL_POOL_SIZE = 10;

    private int limitOnPoolSize;

    private Thread eventHandlingThread;

    protected BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>();

    private final AtomicBoolean stopped;

    private ContainerManagementProtocolProxy cmProxy;

    private Container getContainer(ContainerLauncherEvent event) {
        ContainerId id = event.getContainerID();
        Container c = containers.get(id);
        if (c == null) {
            c = new Container(event.getTaskAttemptID(), event.getContainerID(), event.getContainerMgrAddress());
            Container old = containers.putIfAbsent(id, c);
            if (old != null) {
                c = old;
            }
        }
        return c;
    }

    private void removeContainerIfDone(ContainerId id) {
        Container c = containers.get(id);
        if (c != null && c.isCompletelyDone()) {
            containers.remove(id);
        }
    }

    private static enum ContainerState {

        PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
    }

    private clreplaced Container {

        private ContainerState state;

        // store enough information to be able to cleanup the container
        private TaskAttemptId taskAttemptID;

        private ContainerId containerID;

        final private String containerMgrAddress;

        public Container(TaskAttemptId taId, ContainerId containerID, String containerMgrAddress) {
            this.state = ContainerState.PREP;
            this.taskAttemptID = taId;
            this.containerMgrAddress = containerMgrAddress;
            this.containerID = containerID;
        }

        public synchronized boolean isCompletelyDone() {
            return state == ContainerState.DONE || state == ContainerState.FAILED;
        }

        @SuppressWarnings("unchecked")
        public synchronized void launch(ContainerRemoteLaunchEvent event) {
            LOG.info("Launching " + taskAttemptID);
            if (this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
                state = ContainerState.DONE;
                sendContainerLaunchFailedMsg(taskAttemptID, "Container was killed before it was launched");
                return;
            }
            ContainerManagementProtocolProxyData proxy = null;
            try {
                proxy = getCMProxy(containerMgrAddress, containerID);
                // Construct the actual Container
                ContainerLaunchContext containerLaunchContext = event.getContainerLaunchContext();
                // Now launch the actual container
                StartContainerRequest startRequest = StartContainerRequest.newInstance(containerLaunchContext, event.getContainerToken());
                List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
                list.add(startRequest);
                StartContainersRequest requestList = StartContainersRequest.newInstance(list);
                SplitDataInfo sdi = event.getSplitDataInfo();
                if (sdi != null) {
                    if (containerMgrAddress.split(":")[0].equals(sdi.getTargetName()) && containerID.toString().equals(sdi.getContainerId())) {
                        ContainerLaunchContext clc = requestList.getStartContainerRequests().get(0).getContainerLaunchContext();
                        clc.getEnvironment().put("DeployDecision", sdi.getInfoAppMasterToNode());
                        event.clearSplitDataInfo();
                    }
                }
                StartContainersResponse response = proxy.getContainerManagementProtocol().startContainers(requestList);
                if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerID)) {
                    throw response.getFailedRequests().get(containerID).deSerialize();
                }
                ByteBuffer portInfo = response.getAllServicesMetaData().get(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
                int port = -1;
                if (portInfo != null) {
                    port = ShuffleHandler.deserializeMetaData(portInfo);
                }
                LOG.info("Shuffle port returned by ContainerManager for " + taskAttemptID + " : " + port);
                if (port < 0) {
                    this.state = ContainerState.FAILED;
                    throw new IllegalStateException("Invalid shuffle port number " + port + " returned for " + taskAttemptID);
                }
                // after launching, send launched event to task attempt to move
                // it from replacedIGNED to RUNNING state
                context.getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
                this.state = ContainerState.RUNNING;
            } catch (Throwable t) {
                String message = "Container launch failed for " + containerID + " : " + StringUtils.stringifyException(t);
                this.state = ContainerState.FAILED;
                sendContainerLaunchFailedMsg(taskAttemptID, message);
            } finally {
                if (proxy != null) {
                    cmProxy.mayBeCloseProxy(proxy);
                }
            }
        }

        @SuppressWarnings("unchecked")
        public synchronized void kill() {
            if (this.state == ContainerState.PREP) {
                this.state = ContainerState.KILLED_BEFORE_LAUNCH;
            } else if (!isCompletelyDone()) {
                LOG.info("KILLING " + taskAttemptID);
                ContainerManagementProtocolProxyData proxy = null;
                try {
                    proxy = getCMProxy(this.containerMgrAddress, this.containerID);
                    // kill the remote container if already launched
                    List<ContainerId> ids = new ArrayList<ContainerId>();
                    ids.add(this.containerID);
                    StopContainersRequest request = StopContainersRequest.newInstance(ids);
                    StopContainersResponse response = proxy.getContainerManagementProtocol().stopContainers(request);
                    if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(this.containerID)) {
                        throw response.getFailedRequests().get(this.containerID).deSerialize();
                    }
                } catch (Throwable t) {
                    // ignore the cleanup failure
                    String message = "cleanup failed for container " + this.containerID + " : " + StringUtils.stringifyException(t);
                    context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID, message));
                    LOG.warn(message);
                } finally {
                    if (proxy != null) {
                        cmProxy.mayBeCloseProxy(proxy);
                    }
                }
                this.state = ContainerState.DONE;
            }
            // after killing, send killed event to task attempt
            context.getEventHandler().handle(new TaskAttemptEvent(this.taskAttemptID, TaskAttemptEventType.TA_CONTAINER_CLEANED));
        }
    }

    public ContainerLauncherImpl(AppContext context) {
        super(ContainerLauncherImpl.clreplaced.getName());
        this.context = context;
        this.stopped = new AtomicBoolean(false);
    }

    @Override
    protected void serviceInit(Configuration conf) throws Exception {
        this.limitOnPoolSize = conf.getInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
        LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
        super.serviceInit(conf);
        cmProxy = new ContainerManagementProtocolProxy(conf);
    }

    protected void serviceStart() throws Exception {
        ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
        // Start with a default core-pool size of 10 and change it dynamically.
        launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
        eventHandlingThread = new Thread() {

            @Override
            public void run() {
                ContainerLauncherEvent event = null;
                Set<String> allNodes = new HashSet<String>();
                while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                    try {
                        event = eventQueue.take();
                    } catch (InterruptedException e) {
                        if (!stopped.get()) {
                            LOG.error("Returning, interrupted : " + e);
                        }
                        return;
                    }
                    allNodes.add(event.getContainerMgrAddress());
                    int poolSize = launcherPool.getCorePoolSize();
                    // See if we need up the pool size only if haven't reached the
                    // maximum limit yet.
                    if (poolSize != limitOnPoolSize) {
                        // nodes where containers will run at *this* point of time. This is
                        // *not* the cluster size and doesn't need to be.
                        int numNodes = allNodes.size();
                        int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                        if (poolSize < idealPoolSize) {
                            // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
                            // later is just a buffer so we are not always increasing the
                            // pool-size
                            int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + INITIAL_POOL_SIZE);
                            LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                            launcherPool.setCorePoolSize(newPoolSize);
                        }
                    }
                    // the events from the queue are handled in parallel
                    // using a thread pool
                    launcherPool.execute(createEventProcessor(event));
                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
                }
            }
        };
        eventHandlingThread.setName("ContainerLauncher Event Handler");
        eventHandlingThread.start();
        super.serviceStart();
    }

    private void shutdownAllContainers() {
        for (Container ct : this.containers.values()) {
            if (ct != null) {
                ct.kill();
            }
        }
    }

    protected void serviceStop() throws Exception {
        if (stopped.getAndSet(true)) {
            // return if already stopped
            return;
        }
        // shutdown any containers that might be left running
        shutdownAllContainers();
        if (eventHandlingThread != null) {
            eventHandlingThread.interrupt();
        }
        if (launcherPool != null) {
            launcherPool.shutdownNow();
        }
        super.serviceStop();
    }

    protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
        return new EventProcessor(event);
    }

    /**
     * Setup and start the container on remote nodemanager.
     */
    clreplaced EventProcessor implements Runnable {

        private ContainerLauncherEvent event;

        EventProcessor(ContainerLauncherEvent event) {
            this.event = event;
        }

        @Override
        public void run() {
            LOG.info("Processing the event " + event.toString());
            // Load ContainerManager tokens before creating a connection.
            // TODO: Do it only once per NodeManager.
            ContainerId containerID = event.getContainerID();
            Container c = getContainer(event);
            switch(event.getType()) {
                case CONTAINER_REMOTE_LAUNCH:
                    ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event;
                    c.launch(launchEvent);
                    break;
                case CONTAINER_REMOTE_CLEANUP:
                    c.kill();
                    break;
            }
            removeContainerIfDone(containerID);
        }
    }

    @SuppressWarnings("unchecked")
    void sendContainerLaunchFailedMsg(TaskAttemptId taskAttemptID, String message) {
        LOG.error(message);
        context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
        context.getEventHandler().handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
    }

    @Override
    public void handle(ContainerLauncherEvent event) {
        try {
            eventQueue.put(event);
        } catch (InterruptedException e) {
            throw new YarnRuntimeException(e);
        }
    }

    public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(String containerMgrBindAddr, ContainerId containerId) throws IOException {
        return cmProxy.getProxy(containerMgrBindAddr, containerId);
    }
}

13 Source : ContainerLauncherImpl.java
with Apache License 2.0
from naver

/**
 * This clreplaced is responsible for launching of containers.
 */
public clreplaced ContainerLauncherImpl extends AbstractService implements ContainerLauncher {

    static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.clreplaced);

    private ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();

    private final AppContext context;

    protected ThreadPoolExecutor launcherPool;

    protected int initialPoolSize;

    private int limitOnPoolSize;

    private Thread eventHandlingThread;

    protected BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>();

    private final AtomicBoolean stopped;

    private ContainerManagementProtocolProxy cmProxy;

    private Container getContainer(ContainerLauncherEvent event) {
        ContainerId id = event.getContainerID();
        Container c = containers.get(id);
        if (c == null) {
            c = new Container(event.getTaskAttemptID(), event.getContainerID(), event.getContainerMgrAddress());
            Container old = containers.putIfAbsent(id, c);
            if (old != null) {
                c = old;
            }
        }
        return c;
    }

    private void removeContainerIfDone(ContainerId id) {
        Container c = containers.get(id);
        if (c != null && c.isCompletelyDone()) {
            containers.remove(id);
        }
    }

    private static enum ContainerState {

        PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
    }

    private clreplaced Container {

        private ContainerState state;

        // store enough information to be able to cleanup the container
        private TaskAttemptId taskAttemptID;

        private ContainerId containerID;

        final private String containerMgrAddress;

        public Container(TaskAttemptId taId, ContainerId containerID, String containerMgrAddress) {
            this.state = ContainerState.PREP;
            this.taskAttemptID = taId;
            this.containerMgrAddress = containerMgrAddress;
            this.containerID = containerID;
        }

        public synchronized boolean isCompletelyDone() {
            return state == ContainerState.DONE || state == ContainerState.FAILED;
        }

        @SuppressWarnings("unchecked")
        public synchronized void launch(ContainerRemoteLaunchEvent event) {
            LOG.info("Launching " + taskAttemptID);
            if (this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
                state = ContainerState.DONE;
                sendContainerLaunchFailedMsg(taskAttemptID, "Container was killed before it was launched");
                return;
            }
            ContainerManagementProtocolProxyData proxy = null;
            try {
                proxy = getCMProxy(containerMgrAddress, containerID);
                // Construct the actual Container
                ContainerLaunchContext containerLaunchContext = event.getContainerLaunchContext();
                // Now launch the actual container
                StartContainerRequest startRequest = StartContainerRequest.newInstance(containerLaunchContext, event.getContainerToken());
                List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
                list.add(startRequest);
                StartContainersRequest requestList = StartContainersRequest.newInstance(list);
                StartContainersResponse response = proxy.getContainerManagementProtocol().startContainers(requestList);
                if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerID)) {
                    throw response.getFailedRequests().get(containerID).deSerialize();
                }
                ByteBuffer portInfo = response.getAllServicesMetaData().get(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
                int port = -1;
                if (portInfo != null) {
                    port = ShuffleHandler.deserializeMetaData(portInfo);
                }
                LOG.info("Shuffle port returned by ContainerManager for " + taskAttemptID + " : " + port);
                if (port < 0) {
                    this.state = ContainerState.FAILED;
                    throw new IllegalStateException("Invalid shuffle port number " + port + " returned for " + taskAttemptID);
                }
                // after launching, send launched event to task attempt to move
                // it from replacedIGNED to RUNNING state
                context.getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
                this.state = ContainerState.RUNNING;
            } catch (Throwable t) {
                String message = "Container launch failed for " + containerID + " : " + StringUtils.stringifyException(t);
                this.state = ContainerState.FAILED;
                sendContainerLaunchFailedMsg(taskAttemptID, message);
            } finally {
                if (proxy != null) {
                    cmProxy.mayBeCloseProxy(proxy);
                }
            }
        }

        @SuppressWarnings("unchecked")
        public synchronized void kill() {
            if (this.state == ContainerState.PREP) {
                this.state = ContainerState.KILLED_BEFORE_LAUNCH;
            } else if (!isCompletelyDone()) {
                LOG.info("KILLING " + taskAttemptID);
                ContainerManagementProtocolProxyData proxy = null;
                try {
                    proxy = getCMProxy(this.containerMgrAddress, this.containerID);
                    // kill the remote container if already launched
                    List<ContainerId> ids = new ArrayList<ContainerId>();
                    ids.add(this.containerID);
                    StopContainersRequest request = StopContainersRequest.newInstance(ids);
                    StopContainersResponse response = proxy.getContainerManagementProtocol().stopContainers(request);
                    if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(this.containerID)) {
                        throw response.getFailedRequests().get(this.containerID).deSerialize();
                    }
                } catch (Throwable t) {
                    // ignore the cleanup failure
                    String message = "cleanup failed for container " + this.containerID + " : " + StringUtils.stringifyException(t);
                    context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID, message));
                    LOG.warn(message);
                } finally {
                    if (proxy != null) {
                        cmProxy.mayBeCloseProxy(proxy);
                    }
                }
                this.state = ContainerState.DONE;
            }
            // after killing, send killed event to task attempt
            context.getEventHandler().handle(new TaskAttemptEvent(this.taskAttemptID, TaskAttemptEventType.TA_CONTAINER_CLEANED));
        }
    }

    public ContainerLauncherImpl(AppContext context) {
        super(ContainerLauncherImpl.clreplaced.getName());
        this.context = context;
        this.stopped = new AtomicBoolean(false);
    }

    @Override
    protected void serviceInit(Configuration conf) throws Exception {
        this.limitOnPoolSize = conf.getInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
        LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
        this.initialPoolSize = conf.getInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
        LOG.info("The thread pool initial size is " + this.initialPoolSize);
        super.serviceInit(conf);
        cmProxy = new ContainerManagementProtocolProxy(conf);
    }

    protected void serviceStart() throws Exception {
        ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
        // Start with a default core-pool size of 10 and change it dynamically.
        launcherPool = new ThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
        eventHandlingThread = new Thread() {

            @Override
            public void run() {
                ContainerLauncherEvent event = null;
                Set<String> allNodes = new HashSet<String>();
                while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                    try {
                        event = eventQueue.take();
                    } catch (InterruptedException e) {
                        if (!stopped.get()) {
                            LOG.error("Returning, interrupted : " + e);
                        }
                        return;
                    }
                    allNodes.add(event.getContainerMgrAddress());
                    int poolSize = launcherPool.getCorePoolSize();
                    // See if we need up the pool size only if haven't reached the
                    // maximum limit yet.
                    if (poolSize != limitOnPoolSize) {
                        // nodes where containers will run at *this* point of time. This is
                        // *not* the cluster size and doesn't need to be.
                        int numNodes = allNodes.size();
                        int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                        if (poolSize < idealPoolSize) {
                            // Bump up the pool size to idealPoolSize+initialPoolSize, the
                            // later is just a buffer so we are not always increasing the
                            // pool-size
                            int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize);
                            LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                            launcherPool.setCorePoolSize(newPoolSize);
                        }
                    }
                    // the events from the queue are handled in parallel
                    // using a thread pool
                    launcherPool.execute(createEventProcessor(event));
                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
                }
            }
        };
        eventHandlingThread.setName("ContainerLauncher Event Handler");
        eventHandlingThread.start();
        super.serviceStart();
    }

    private void shutdownAllContainers() {
        for (Container ct : this.containers.values()) {
            if (ct != null) {
                ct.kill();
            }
        }
    }

    protected void serviceStop() throws Exception {
        if (stopped.getAndSet(true)) {
            // return if already stopped
            return;
        }
        // shutdown any containers that might be left running
        shutdownAllContainers();
        if (eventHandlingThread != null) {
            eventHandlingThread.interrupt();
        }
        if (launcherPool != null) {
            launcherPool.shutdownNow();
        }
        super.serviceStop();
    }

    protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
        return new EventProcessor(event);
    }

    /**
     * Setup and start the container on remote nodemanager.
     */
    clreplaced EventProcessor implements Runnable {

        private ContainerLauncherEvent event;

        EventProcessor(ContainerLauncherEvent event) {
            this.event = event;
        }

        @Override
        public void run() {
            LOG.info("Processing the event " + event.toString());
            // Load ContainerManager tokens before creating a connection.
            // TODO: Do it only once per NodeManager.
            ContainerId containerID = event.getContainerID();
            Container c = getContainer(event);
            switch(event.getType()) {
                case CONTAINER_REMOTE_LAUNCH:
                    ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event;
                    c.launch(launchEvent);
                    break;
                case CONTAINER_REMOTE_CLEANUP:
                    c.kill();
                    break;
            }
            removeContainerIfDone(containerID);
        }
    }

    @SuppressWarnings("unchecked")
    void sendContainerLaunchFailedMsg(TaskAttemptId taskAttemptID, String message) {
        LOG.error(message);
        context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
        context.getEventHandler().handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
    }

    @Override
    public void handle(ContainerLauncherEvent event) {
        try {
            eventQueue.put(event);
        } catch (InterruptedException e) {
            throw new YarnRuntimeException(e);
        }
    }

    public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(String containerMgrBindAddr, ContainerId containerId) throws IOException {
        return cmProxy.getProxy(containerMgrBindAddr, containerId);
    }
}

13 Source : ContainerLauncherImpl.java
with Apache License 2.0
from lsds

/**
 * This clreplaced is responsible for launching of containers.
 */
public clreplaced ContainerLauncherImpl extends AbstractService implements ContainerLauncher {

    static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.clreplaced);

    private ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();

    private final AppContext context;

    protected ThreadPoolExecutor launcherPool;

    protected static final int INITIAL_POOL_SIZE = 10;

    private int limitOnPoolSize;

    private Thread eventHandlingThread;

    protected BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>();

    private final AtomicBoolean stopped;

    private ContainerManagementProtocolProxy cmProxy;

    private Container getContainer(ContainerLauncherEvent event) {
        ContainerId id = event.getContainerID();
        Container c = containers.get(id);
        if (c == null) {
            c = new Container(event.getTaskAttemptID(), event.getContainerID(), event.getContainerMgrAddress());
            Container old = containers.putIfAbsent(id, c);
            if (old != null) {
                c = old;
            }
        }
        return c;
    }

    private void removeContainerIfDone(ContainerId id) {
        Container c = containers.get(id);
        if (c != null && c.isCompletelyDone()) {
            containers.remove(id);
        }
    }

    private static enum ContainerState {

        PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
    }

    private clreplaced Container {

        private ContainerState state;

        // store enough information to be able to cleanup the container
        private TaskAttemptId taskAttemptID;

        private ContainerId containerID;

        final private String containerMgrAddress;

        public Container(TaskAttemptId taId, ContainerId containerID, String containerMgrAddress) {
            this.state = ContainerState.PREP;
            this.taskAttemptID = taId;
            this.containerMgrAddress = containerMgrAddress;
            this.containerID = containerID;
        }

        public synchronized boolean isCompletelyDone() {
            return state == ContainerState.DONE || state == ContainerState.FAILED;
        }

        @SuppressWarnings("unchecked")
        public synchronized void launch(ContainerRemoteLaunchEvent event) {
            LOG.info("Launching " + taskAttemptID);
            if (this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
                state = ContainerState.DONE;
                sendContainerLaunchFailedMsg(taskAttemptID, "Container was killed before it was launched");
                return;
            }
            ContainerManagementProtocolProxyData proxy = null;
            try {
                proxy = getCMProxy(containerMgrAddress, containerID);
                // Construct the actual Container
                ContainerLaunchContext containerLaunchContext = event.getContainerLaunchContext();
                // Now launch the actual container
                StartContainerRequest startRequest = StartContainerRequest.newInstance(containerLaunchContext, event.getContainerToken());
                List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
                list.add(startRequest);
                StartContainersRequest requestList = StartContainersRequest.newInstance(list);
                StartContainersResponse response = proxy.getContainerManagementProtocol().startContainers(requestList);
                if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerID)) {
                    throw response.getFailedRequests().get(containerID).deSerialize();
                }
                ByteBuffer portInfo = response.getAllServicesMetaData().get(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
                int port = -1;
                if (portInfo != null) {
                    port = ShuffleHandler.deserializeMetaData(portInfo);
                }
                LOG.info("Shuffle port returned by ContainerManager for " + taskAttemptID + " : " + port);
                if (port < 0) {
                    this.state = ContainerState.FAILED;
                    throw new IllegalStateException("Invalid shuffle port number " + port + " returned for " + taskAttemptID);
                }
                // after launching, send launched event to task attempt to move
                // it from replacedIGNED to RUNNING state
                context.getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
                this.state = ContainerState.RUNNING;
            } catch (Throwable t) {
                String message = "Container launch failed for " + containerID + " : " + StringUtils.stringifyException(t);
                this.state = ContainerState.FAILED;
                sendContainerLaunchFailedMsg(taskAttemptID, message);
            } finally {
                if (proxy != null) {
                    cmProxy.mayBeCloseProxy(proxy);
                }
            }
        }

        @SuppressWarnings("unchecked")
        public synchronized void kill() {
            if (this.state == ContainerState.PREP) {
                this.state = ContainerState.KILLED_BEFORE_LAUNCH;
            } else if (!isCompletelyDone()) {
                LOG.info("KILLING " + taskAttemptID);
                ContainerManagementProtocolProxyData proxy = null;
                try {
                    proxy = getCMProxy(this.containerMgrAddress, this.containerID);
                    // kill the remote container if already launched
                    List<ContainerId> ids = new ArrayList<ContainerId>();
                    ids.add(this.containerID);
                    StopContainersRequest request = StopContainersRequest.newInstance(ids);
                    StopContainersResponse response = proxy.getContainerManagementProtocol().stopContainers(request);
                    if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(this.containerID)) {
                        throw response.getFailedRequests().get(this.containerID).deSerialize();
                    }
                } catch (Throwable t) {
                    // ignore the cleanup failure
                    String message = "cleanup failed for container " + this.containerID + " : " + StringUtils.stringifyException(t);
                    context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID, message));
                    LOG.warn(message);
                } finally {
                    if (proxy != null) {
                        cmProxy.mayBeCloseProxy(proxy);
                    }
                }
                this.state = ContainerState.DONE;
            }
            // after killing, send killed event to task attempt
            context.getEventHandler().handle(new TaskAttemptEvent(this.taskAttemptID, TaskAttemptEventType.TA_CONTAINER_CLEANED));
        }
    }

    public ContainerLauncherImpl(AppContext context) {
        super(ContainerLauncherImpl.clreplaced.getName());
        this.context = context;
        this.stopped = new AtomicBoolean(false);
    }

    @Override
    protected void serviceInit(Configuration conf) throws Exception {
        this.limitOnPoolSize = conf.getInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
        LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
        super.serviceInit(conf);
        cmProxy = new ContainerManagementProtocolProxy(conf);
    }

    protected void serviceStart() throws Exception {
        ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("ContainerLauncher #%d").setDaemon(true).build();
        // Start with a default core-pool size of 10 and change it dynamically.
        launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
        eventHandlingThread = new Thread() {

            @Override
            public void run() {
                ContainerLauncherEvent event = null;
                Set<String> allNodes = new HashSet<String>();
                while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                    try {
                        event = eventQueue.take();
                    } catch (InterruptedException e) {
                        if (!stopped.get()) {
                            LOG.error("Returning, interrupted : " + e);
                        }
                        return;
                    }
                    allNodes.add(event.getContainerMgrAddress());
                    int poolSize = launcherPool.getCorePoolSize();
                    // See if we need up the pool size only if haven't reached the
                    // maximum limit yet.
                    if (poolSize != limitOnPoolSize) {
                        // nodes where containers will run at *this* point of time. This is
                        // *not* the cluster size and doesn't need to be.
                        int numNodes = allNodes.size();
                        int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
                        if (poolSize < idealPoolSize) {
                            // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
                            // later is just a buffer so we are not always increasing the
                            // pool-size
                            int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + INITIAL_POOL_SIZE);
                            LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes);
                            launcherPool.setCorePoolSize(newPoolSize);
                        }
                    }
                    // the events from the queue are handled in parallel
                    // using a thread pool
                    launcherPool.execute(createEventProcessor(event));
                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
                }
            }
        };
        eventHandlingThread.setName("ContainerLauncher Event Handler");
        eventHandlingThread.start();
        super.serviceStart();
    }

    private void shutdownAllContainers() {
        for (Container ct : this.containers.values()) {
            if (ct != null) {
                ct.kill();
            }
        }
    }

    protected void serviceStop() throws Exception {
        if (stopped.getAndSet(true)) {
            // return if already stopped
            return;
        }
        // shutdown any containers that might be left running
        shutdownAllContainers();
        if (eventHandlingThread != null) {
            eventHandlingThread.interrupt();
        }
        if (launcherPool != null) {
            launcherPool.shutdownNow();
        }
        super.serviceStop();
    }

    protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
        return new EventProcessor(event);
    }

    /**
     * Setup and start the container on remote nodemanager.
     */
    clreplaced EventProcessor implements Runnable {

        private ContainerLauncherEvent event;

        EventProcessor(ContainerLauncherEvent event) {
            this.event = event;
        }

        @Override
        public void run() {
            LOG.info("Processing the event " + event.toString());
            // Load ContainerManager tokens before creating a connection.
            // TODO: Do it only once per NodeManager.
            ContainerId containerID = event.getContainerID();
            Container c = getContainer(event);
            switch(event.getType()) {
                case CONTAINER_REMOTE_LAUNCH:
                    ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event;
                    c.launch(launchEvent);
                    break;
                case CONTAINER_REMOTE_CLEANUP:
                    c.kill();
                    break;
            }
            removeContainerIfDone(containerID);
        }
    }

    @SuppressWarnings("unchecked")
    void sendContainerLaunchFailedMsg(TaskAttemptId taskAttemptID, String message) {
        LOG.error(message);
        context.getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
        context.getEventHandler().handle(new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
    }

    @Override
    public void handle(ContainerLauncherEvent event) {
        try {
            eventQueue.put(event);
        } catch (InterruptedException e) {
            throw new YarnRuntimeException(e);
        }
    }

    public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(String containerMgrBindAddr, ContainerId containerId) throws IOException {
        return cmProxy.getProxy(containerMgrBindAddr, containerId);
    }
}

See More Examples