diff --git a/trans-array.c b/trans-array.c index 3472804e4c6344e68d15af2feee627429ca61018..f615e4e6a10d59b6878033876f7b170f516e651b 100644 *** a/trans-array.c --- b/trans-array.c *************** gfc_conv_array_ref (gfc_se * se, gfc_arr *** 2830,2835 **** --- 2830,2863 ---- } + /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's + LOOP_DIM dimension (if any) to array's offset. */ + + static void + add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss, + gfc_array_ref *ar, int array_dim, int loop_dim) + { + gfc_se se; + gfc_ss_info *info; + tree stride, index; + + info = &ss->data.info; + + gfc_init_se (&se, NULL); + se.loop = loop; + se.expr = info->descriptor; + stride = gfc_conv_array_stride (info->descriptor, array_dim); + index = gfc_conv_array_index_offset (&se, info, array_dim, loop_dim, ar, + stride); + gfc_add_block_to_block (pblock, &se.pre); + + info->offset = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, + info->offset, index); + info->offset = gfc_evaluate_now (info->offset, pblock); + } + + /* Generate the code to be executed immediately before entering a scalarization loop. */ *************** static void *** 2837,2847 **** gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag, stmtblock_t * pblock) { - tree index; tree stride; gfc_ss_info *info; gfc_ss *ss; ! gfc_se se; int i; /* This code will be executed before entering the scalarization loop --- 2865,2874 ---- gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag, stmtblock_t * pblock) { tree stride; gfc_ss_info *info; gfc_ss *ss; ! gfc_array_ref *ar; int i; /* This code will be executed before entering the scalarization loop *************** gfc_trans_preloop_setup (gfc_loopinfo * *** 2860,2933 **** if (dim >= info->dimen) continue; - if (dim == info->dimen - 1) - { - /* For the outermost loop calculate the offset due to any - elemental dimensions. It will have been initialized with the - base offset of the array. */ if (info->ref) ! { ! for (i = 0; i < info->ref->u.ar.dimen; i++) ! { ! if (info->ref->u.ar.dimen_type[i] != DIMEN_ELEMENT) ! continue; ! gfc_init_se (&se, NULL); ! se.loop = loop; ! se.expr = info->descriptor; ! stride = gfc_conv_array_stride (info->descriptor, i); ! index = gfc_conv_array_index_offset (&se, info, i, -1, ! &info->ref->u.ar, ! stride); ! gfc_add_block_to_block (pblock, &se.pre); ! info->offset = fold_build2_loc (input_location, PLUS_EXPR, ! gfc_array_index_type, ! info->offset, index); ! info->offset = gfc_evaluate_now (info->offset, pblock); ! } ! } ! i = loop->order[0]; ! /* For the time being, the innermost loop is unconditionally on ! the first dimension of the scalarization loop. */ ! gcc_assert (i == 0); stride = gfc_conv_array_stride (info->descriptor, info->dim[i]); /* Calculate the stride of the innermost loop. Hopefully this will allow the backend optimizers to do their stuff more effectively. */ info->stride0 = gfc_evaluate_now (stride, pblock); - } - else - { - /* Add the offset for the previous loop dimension. */ - gfc_array_ref *ar; if (info->ref) { ! ar = &info->ref->u.ar; ! i = loop->order[dim + 1]; ! } ! else { ! ar = NULL; ! i = dim + 1; ! } ! gfc_init_se (&se, NULL); ! se.loop = loop; ! se.expr = info->descriptor; ! stride = gfc_conv_array_stride (info->descriptor, info->dim[i]); ! index = gfc_conv_array_index_offset (&se, info, info->dim[i], i, ! ar, stride); ! gfc_add_block_to_block (pblock, &se.pre); ! info->offset = fold_build2_loc (input_location, PLUS_EXPR, ! gfc_array_index_type, info->offset, ! index); ! info->offset = gfc_evaluate_now (info->offset, pblock); } /* Remember this offset for the second loop. */ if (dim == loop->temp_dim - 1) --- 2887,2934 ---- if (dim >= info->dimen) continue; + gcc_assert (info->dimen == loop->dimen); if (info->ref) ! ar = &info->ref->u.ar; ! else ! ar = NULL; ! if (dim == loop->dimen - 1) ! i = 0; ! else ! i = dim + 1; ! /* For the time being, there is no loop reordering. */ ! gcc_assert (i == loop->order[i]); ! i = loop->order[i]; ! if (dim == loop->dimen - 1) ! { stride = gfc_conv_array_stride (info->descriptor, info->dim[i]); /* Calculate the stride of the innermost loop. Hopefully this will allow the backend optimizers to do their stuff more effectively. */ info->stride0 = gfc_evaluate_now (stride, pblock); + /* For the outermost loop calculate the offset due to any + elemental dimensions. It will have been initialized with the + base offset of the array. */ if (info->ref) { ! for (i = 0; i < ar->dimen; i++) { ! if (ar->dimen_type[i] != DIMEN_ELEMENT) ! continue; ! add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1); } + } + } + else + /* Add the offset for the previous loop dimension. */ + add_array_offset (pblock, loop, ss, ar, info->dim[i], i); /* Remember this offset for the second loop. */ if (dim == loop->temp_dim - 1)