Last active
September 11, 2020 07:08
-
-
Save vzayaz/66991613b1a24b4d492687f4294663b1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
public Object generate(Object[] references) { | |
return new GeneratedIteratorForCodegenStage1(references); | |
} | |
/*wsc_codegenStageId*/ | |
final class GeneratedIteratorForCodegenStage1 extends org.apache.spark.sql.execution.BufferedRowIterator { | |
private Object[] references; | |
private scala.collection.Iterator[] inputs; | |
private boolean agg_initAgg_0; | |
private boolean agg_bufIsNull_0; | |
private long agg_bufValue_0; | |
private boolean agg_initAgg_1; | |
private boolean agg_bufIsNull_1; | |
private long agg_bufValue_1; | |
private boolean range_initRange_0; | |
private long range_number_0; | |
private TaskContext range_taskContext_0; | |
private InputMetrics range_inputMetrics_0; | |
private long range_batchEnd_0; | |
private long range_numElementsTodo_0; | |
private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[] range_mutableStateArray_0 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter[5]; | |
public GeneratedIteratorForCodegenStage1(Object[] references) { | |
this.references = references; | |
} | |
public void init(int index, scala.collection.Iterator[] inputs) { | |
partitionIndex = index; | |
this.inputs = inputs; | |
range_taskContext_0 = TaskContext.get(); | |
range_inputMetrics_0 = range_taskContext_0.taskMetrics().inputMetrics(); | |
range_mutableStateArray_0[0] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0); | |
range_mutableStateArray_0[1] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0); | |
range_mutableStateArray_0[2] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0); | |
range_mutableStateArray_0[3] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0); | |
range_mutableStateArray_0[4] = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(1, 0); | |
} | |
private void agg_doAggregateWithoutKey_0() throws java.io.IOException { | |
// initialize aggregation buffer | |
agg_bufIsNull_0 = false; | |
agg_bufValue_0 = 0L; | |
while (!agg_initAgg_1) { | |
agg_initAgg_1 = true; | |
long agg_beforeAgg_0 = System.nanoTime(); | |
agg_doAggregateWithoutKey_1(); | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[3] /* aggTime */).add((System.nanoTime() - agg_beforeAgg_0) / 1000000); | |
// output the result | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[2] /* numOutputRows */).add(1); | |
agg_doConsume_1(agg_bufValue_1); | |
} | |
} | |
private void agg_doConsume_1(long agg_expr_0_0) throws java.io.IOException { | |
// do aggregate | |
// common sub-expressions | |
// evaluate aggregate function | |
long agg_value_9 = -1L; | |
agg_value_9 = agg_bufValue_0 + agg_expr_0_0; | |
// update aggregation buffer | |
agg_bufIsNull_0 = false; | |
agg_bufValue_0 = agg_value_9; | |
} | |
private void initRange(int idx) { | |
java.math.BigInteger index = java.math.BigInteger.valueOf(idx); | |
java.math.BigInteger numSlice = java.math.BigInteger.valueOf(1L); | |
java.math.BigInteger numElement = java.math.BigInteger.valueOf(1000000000L); | |
java.math.BigInteger step = java.math.BigInteger.valueOf(1L); | |
java.math.BigInteger start = java.math.BigInteger.valueOf(0L); | |
long partitionEnd; | |
java.math.BigInteger st = index.multiply(numElement).divide(numSlice).multiply(step).add(start); | |
if (st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) { | |
range_number_0 = Long.MAX_VALUE; | |
} else if (st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) { | |
range_number_0 = Long.MIN_VALUE; | |
} else { | |
range_number_0 = st.longValue(); | |
} | |
range_batchEnd_0 = range_number_0; | |
java.math.BigInteger end = index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice) | |
.multiply(step).add(start); | |
if (end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) { | |
partitionEnd = Long.MAX_VALUE; | |
} else if (end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) { | |
partitionEnd = Long.MIN_VALUE; | |
} else { | |
partitionEnd = end.longValue(); | |
} | |
java.math.BigInteger startToEnd = java.math.BigInteger.valueOf(partitionEnd).subtract( | |
java.math.BigInteger.valueOf(range_number_0)); | |
range_numElementsTodo_0 = startToEnd.divide(step).longValue(); | |
if (range_numElementsTodo_0 < 0) { | |
range_numElementsTodo_0 = 0; | |
} else if (startToEnd.remainder(step).compareTo(java.math.BigInteger.valueOf(0L)) != 0) { | |
range_numElementsTodo_0++; | |
} | |
} | |
private void agg_doConsume_0() throws java.io.IOException { | |
// do aggregate | |
// common sub-expressions | |
// evaluate aggregate function | |
long agg_value_4 = -1L; | |
agg_value_4 = agg_bufValue_1 + 1L; | |
// update aggregation buffer | |
agg_bufIsNull_1 = false; | |
agg_bufValue_1 = agg_value_4; | |
} | |
private void agg_doAggregateWithoutKey_1() throws java.io.IOException { | |
// initialize aggregation buffer | |
agg_bufIsNull_1 = false; | |
agg_bufValue_1 = 0L; | |
// initialize Range | |
if (!range_initRange_0) { | |
range_initRange_0 = true; | |
initRange(partitionIndex); | |
} | |
while (true) { | |
long range_range_0 = range_batchEnd_0 - range_number_0; | |
if (range_range_0 != 0L) { | |
int range_localEnd_0 = (int)(range_range_0 / 1L); | |
for (int range_localIdx_0 = 0; range_localIdx_0 < range_localEnd_0; range_localIdx_0++) { | |
long range_value_0 = ((long)range_localIdx_0 * 1L) + range_number_0; | |
do { | |
boolean filter_isNull_0 = true; | |
boolean filter_value_0 = false; | |
boolean filter_isNull_1 = false; | |
long filter_value_1 = -1L; | |
if (2L == 0) { | |
filter_isNull_1 = true; | |
} else { | |
filter_value_1 = (long)(range_value_0 % 2L); //HERE IS THE FILTER CONDITION | |
} | |
if (!filter_isNull_1) { | |
filter_isNull_0 = false; // resultCode could change nullability. | |
filter_value_0 = filter_value_1 == 0L; | |
} | |
if (filter_isNull_0 || !filter_value_0) continue; | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[1] /* numOutputRows */).add(1); | |
agg_doConsume_0(); | |
} while(false); | |
// shouldStop check is eliminated | |
} | |
range_number_0 = range_batchEnd_0; | |
} | |
range_taskContext_0.killTaskIfInterrupted(); | |
long range_nextBatchTodo_0; | |
if (range_numElementsTodo_0 > 1000L) { | |
range_nextBatchTodo_0 = 1000L; | |
range_numElementsTodo_0 -= 1000L; | |
} else { | |
range_nextBatchTodo_0 = range_numElementsTodo_0; | |
range_numElementsTodo_0 = 0; | |
if (range_nextBatchTodo_0 == 0) break; | |
} | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[0] /* numOutputRows */).add(range_nextBatchTodo_0); | |
range_inputMetrics_0.incRecordsRead(range_nextBatchTodo_0); | |
range_batchEnd_0 += range_nextBatchTodo_0 * 1L; | |
} | |
} | |
protected void processNext() throws java.io.IOException { | |
while (!agg_initAgg_0) { | |
agg_initAgg_0 = true; | |
long agg_beforeAgg_1 = System.nanoTime(); | |
agg_doAggregateWithoutKey_0(); | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[5] /* aggTime */).add((System.nanoTime() - agg_beforeAgg_1) / 1000000); | |
// output the result | |
((org.apache.spark.sql.execution.metric.SQLMetric) references[4] /* numOutputRows */).add(1); | |
range_mutableStateArray_0[4].reset(); | |
range_mutableStateArray_0[4].zeroOutNullBytes(); | |
range_mutableStateArray_0[4].write(0, agg_bufValue_0); | |
append((range_mutableStateArray_0[4].getRow())); | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment