blob: 35d7a404e10990a5c350c1d63f43198d5a4e8d65 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
- job:
name: jdk9-terasort-benchmark
project-type: matrix
defaults: global
description: |
* Runs the terasort benchmark.
logrotate:
numToKeep: 10
artifactNumToKeep: 1
properties:
- authorization:
linaro:
- job-read
disabled: false
node: aarch64-06
display-name: 'OpenJDK 9 - Run terasort benchmark'
axes:
- axis:
type: user-defined
name: JVM_VARIANT
values:
- server
- client
- axis:
type: user-defined
name: BUILD_TYPE
values:
- release
execution-strategy:
sequential: true
wrappers:
- workspace-cleanup:
dirmatch: false
- timestamps
- matrix-tie-parent:
node: aarch64-06
builders:
- copyartifact:
project: jdk9-build-image
filter: 'out/jdk9-${JVM_VARIANT}-${BUILD_TYPE}.tar.gz'
target: incoming
flatten: true
- copyartifact:
project: openjdk8-hadoop-LCA14
filter: 'out/openjdk8-hadoop-LCA14.tar.gz'
target: incoming
flatten: true
- shell: |
#!/bin/bash
set -exu
NGIGABYTES=1
## Extract jdk
rm -rf jdk9*
tar xf incoming/BUILD_TYPE=${BUILD_TYPE},JVM_VARIANT=${JVM_VARIANT}/jdk9-${JVM_VARIANT}-${BUILD_TYPE}.tar.gz
export JAVA_HOME=${WORKSPACE}/jdk9-${JVM_VARIANT}-${BUILD_TYPE}
export PATH=${JAVA_HOME}/bin:$PATH
## Extract Hadoop pre-builts
rm -rf openjdk8-hadooop-LCA14
tar xf incoming/openjdk8-hadoop-LCA14.tar.gz
## Benchmark
# FIXME
(cd incoming; wget --progress=dot -e dotbytes=10M http://openjdk-apm1/openjdk8-build-artefacts/${NGIGABYTES}GB.tar.gz; tar xf ${NGIGABYTES}GB.tar.gz)
TERAGEN_BASELINE_DIR=${WORKSPACE}/incoming
HADOOP_DIR=${WORKSPACE}/openjdk8-hadoop-LCA14
rm -rf out
mkdir out
sed -i '/^export JAVA_HOME=/d' ${HADOOP_DIR}/conf/hadoop-env.sh
echo "export JAVA_HOME=$JAVA_HOME" >> ${HADOOP_DIR}/conf/hadoop-env.sh
source ${HADOOP_DIR}/env.sh
which hadoop
which java
which hdfs
java -version
stop-dfs.sh
stop-yarn.sh
rm -rf ${HOME}/hadoop-tmp
hdfs namenode -format -force
start-dfs.sh
start-yarn.sh
# Need time for the datanodes to materialise.
sleep 30
jps
hadoop fs -mkdir -p /user/$USER
hadoop fs -copyFromLocal $TERAGEN_BASELINE_DIR/${NGIGABYTES}GB /user/$USER
trap "stop-dfs.sh; stop-yarn.sh" EXIT
elapsed_time_file=$(mktemp /tmp/benchmark-terasort-XXXXXX.$$)
HADOOP_OPTS=-${JVM_VARIANT} /usr/bin/time -o $elapsed_time_file -f "%e" terasort ${NGIGABYTES}GB ${NGIGABYTES}GB-sorted
hadoop fs -rm -R ${NGIGABYTES}GB-sorted
sed -i 's/\..*//' $elapsed_time_file
elapsed_time=$(cat $elapsed_time_file)
date_as_epoch=$(date --date="$(date +'%Y-%m-%d')" +%s)
echo "$date_as_epoch,$JVM_VARIANT,$NGIGABYTES,$elapsed_time" > out/terasort-results-${JVM_VARIANT}-${BUILD_TYPE}.csv
rm -rf incoming/${NGIGABYTES}*
publishers:
- archive:
artifacts: 'out/terasort-results-*.csv'
|