From 47603c0cf577b620f5e53f0234f2b30158d210ee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= <marko.makela@mariadb.com>
Date: Sat, 10 Nov 2018 14:49:54 +0200
Subject: [PATCH] MDEV-16329: Remove most of InnoDB online table rebuild code

Some more changes will be needed in handler0alter.cc.
This patch is against mariadb-10.4.0 and is only a starting point.
---
 storage/innobase/btr/btr0cur.cc           |   13 -
 storage/innobase/handler/ha_innodb.cc     |   41 +-
 storage/innobase/handler/handler0alter.cc |  171 +-
 storage/innobase/include/dict0dict.ic     |    5 +-
 storage/innobase/include/handler0alter.h  |   10 -
 storage/innobase/include/row0ins.h        |   28 -
 storage/innobase/include/row0log.h        |  128 +-
 storage/innobase/include/row0merge.h      |    4 +-
 storage/innobase/include/srv0srv.h        |    4 -
 storage/innobase/include/ut0stage.h       |   36 +-
 storage/innobase/lock/lock0lock.cc        |    2 +
 storage/innobase/row/row0ins.cc           |  149 +-
 storage/innobase/row/row0log.cc           | 2904 +--------------------
 storage/innobase/row/row0merge.cc         |   50 +-
 storage/innobase/row/row0uins.cc          |   25 +-
 storage/innobase/row/row0umod.cc          |   64 +-
 storage/innobase/row/row0upd.cc           |   25 +-
 storage/innobase/srv/srv0srv.cc           |    5 -
 storage/innobase/srv/srv0start.cc         |    1 -
 19 files changed, 60 insertions(+), 3605 deletions(-)

diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 0654829e3f4..76aabf2cfd6 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -5304,10 +5304,6 @@ btr_cur_del_mark_set_clust_rec(
 		 << ib::hex(trx_get_id_for_print(trx)) << ": "
 		 << rec_printer(rec, offsets).str());
 
-	if (dict_index_is_online_ddl(index)) {
-		row_log_table_delete(rec, index, offsets, NULL);
-	}
-
 	row_upd_rec_sys_fields(rec, page_zip, index, offsets, trx, roll_ptr);
 
 	btr_cur_del_mark_set_clust_rec_log(rec, index, trx->id,
@@ -7579,9 +7575,6 @@ btr_store_big_rec_extern_fields(
 						+ BTR_BLOB_HDR_NEXT_PAGE_NO,
 						page_no, MLOG_4BYTES, &mtr);
 				}
-
-			} else if (dict_index_is_online_ddl(index)) {
-				row_log_table_blob_alloc(index, page_no);
 			}
 
 			if (page_zip) {
@@ -7890,8 +7883,6 @@ btr_free_externally_stored_field(
 	page_t*		page;
 	const ulint	space_id	= mach_read_from_4(
 		field_ref + BTR_EXTERN_SPACE_ID);
-	const ulint	start_page	= mach_read_from_4(
-		field_ref + BTR_EXTERN_PAGE_NO);
 	ulint		page_no;
 	ulint		next_page_no;
 	mtr_t		mtr;
@@ -7971,10 +7962,6 @@ btr_free_externally_stored_field(
 			return;
 		}
 
-		if (page_no == start_page && dict_index_is_online_ddl(index)) {
-			row_log_table_blob_free(index, start_page);
-		}
-
 		ext_block = buf_page_get(
 			page_id_t(space_id, page_no), ext_page_size,
 			RW_X_LATCH, &mtr);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 6dedef20c05..c759fb3e8bc 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -5330,17 +5330,10 @@ ha_innobase::column_bitmaps_signal()
 		return;
 	}
 
-	dict_index_t*	clust_index = dict_table_get_first_index(m_prebuilt->table);
 	uint	num_v = 0;
 	for (uint j = 0; j < table->s->virtual_fields; j++) {
-		if (table->vfield[j]->stored_in_db()) {
-			continue;
-		}
-
-		dict_col_t*	col = &m_prebuilt->table->v_cols[num_v].m_col;
-		if (col->ord_part ||
-		    (dict_index_is_online_ddl(clust_index) &&
-		     row_log_col_is_indexed(clust_index, num_v))) {
+		if (!table->vfield[j]->stored_in_db()
+		    && m_prebuilt->table->v_cols[num_v].m_col.ord_part) {
 			table->mark_virtual_column_with_deps(table->vfield[j]);
 		}
 		num_v++;
@@ -8370,34 +8363,12 @@ calc_row_difference(
 			}
 		}
 
-#ifdef UNIV_DEBUG
-		bool	online_ord_part = false;
-#endif
-
 		if (is_virtual) {
 			/* If the virtual column is not indexed,
 			we shall ignore it for update */
 			if (!col->ord_part) {
-				/* Check whether there is a table-rebuilding
-				online ALTER TABLE in progress, and this
-				virtual column could be newly indexed, thus
-				it will be materialized. Then we will have
-				to log its update.
-				Note, we do not support online dropping virtual
-				column while adding new index, nor with
-				online alter column order while adding index,
-				so the virtual column sequence must not change
-				if it is online operation */
-				if (dict_index_is_online_ddl(clust_index)
-				    && row_log_col_is_indexed(clust_index,
-							      num_v)) {
-#ifdef UNIV_DEBUG
-					online_ord_part = true;
-#endif
-				} else {
-					num_v++;
-					continue;
-				}
+				num_v++;
+				continue;
 			}
 
 			if (!uvect->old_vrow) {
@@ -8473,7 +8444,7 @@ calc_row_difference(
 				upd_fld_set_virtual_col(ufield);
 				ufield->field_no = num_v;
 
-				ut_ad(col->ord_part || online_ord_part);
+				ut_ad(col->ord_part); // FIXME: is this OK?
 				ufield->old_v_val = static_cast<dfield_t*>(
 					mem_heap_alloc(
 						uvect->heap,
@@ -8556,7 +8527,7 @@ calc_row_difference(
 				prebuilt, vfield, o_len,
 				col, old_mysql_row_col,
 				col_pack_len, buf);
-			ut_ad(col->ord_part || online_ord_part);
+			ut_ad(col->ord_part); // FIXME: is this OK?
 			num_v++;
 		}
 	}
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index f0e31545154..e326d04a568 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -791,12 +791,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
 	const char* const old_v_col_names;
 	/** 0, or 1 + first column whose position changes in instant ALTER */
 	unsigned	first_alter_pos;
-	/** Allow non-null conversion.
-	(1) Alter ignore should allow the conversion
-	irrespective of sql mode.
-	(2) Don't allow the conversion in strict mode
-	(3) Allow the conversion only in non-strict mode. */
-	const bool	allow_not_null;
 
 	/** The page_compression_level attribute, or 0 */
 	const uint	page_compression_level;
@@ -817,7 +811,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
 				ulint add_autoinc_arg,
 				ulonglong autoinc_col_min_value_arg,
 				ulonglong autoinc_col_max_value_arg,
-				bool allow_not_null_flag,
 				bool page_compressed,
 				ulonglong page_compression_level_arg) :
 		inplace_alter_handler_ctx(),
@@ -854,7 +847,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
 		old_v_cols(prebuilt_arg->table->v_cols),
 		old_v_col_names(prebuilt_arg->table->v_col_names),
 		first_alter_pos(0),
-		allow_not_null(allow_not_null_flag),
 		page_compression_level(page_compressed
 				       ? (page_compression_level_arg
 					  ? uint(page_compression_level_arg)
@@ -2882,58 +2874,6 @@ innobase_fields_to_mysql(
 	}
 }
 
-/*************************************************************//**
-Copies an InnoDB row to table->record[0].
-This is used in preparation for print_keydup_error() from
-row_log_table_apply() */
-void
-innobase_row_to_mysql(
-/*==================*/
-	struct TABLE*		table,	/*!< in/out: MySQL table */
-	const dict_table_t*	itab,	/*!< in: InnoDB table */
-	const dtuple_t*		row)	/*!< in: InnoDB row */
-{
-	uint	n_fields = table->s->fields;
-	ulint	num_v = 0;
-
-	/* The InnoDB row may contain an extra FTS_DOC_ID column at the end. */
-	ut_ad(row->n_fields == dict_table_get_n_cols(itab));
-	ut_ad(n_fields == row->n_fields - DATA_N_SYS_COLS
-	      + dict_table_get_n_v_cols(itab)
-	      - !!(DICT_TF2_FLAG_IS_SET(itab, DICT_TF2_FTS_HAS_DOC_ID)));
-
-	for (uint i = 0; i < n_fields; i++) {
-		Field*		field	= table->field[i];
-
-		field->reset();
-
-		if (innobase_is_v_fld(field)) {
-			/* Virtual column are not stored in InnoDB table, so
-			skip it */
-			num_v++;
-			continue;
-		}
-
-		const dfield_t*	df	= dtuple_get_nth_field(row, i - num_v);
-
-		if (dfield_is_ext(df) || dfield_is_null(df)) {
-			field->set_null();
-		} else {
-			field->set_notnull();
-
-			innobase_col_to_mysql(
-				dict_table_get_nth_col(itab, i - num_v),
-				static_cast<const uchar*>(dfield_get_data(df)),
-				dfield_get_len(df), field);
-		}
-	}
-	if (table->vfield) {
-		my_bitmap_map*	old_read_set = tmp_use_all_columns(table, table->read_set);
-		table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ);
-		tmp_restore_column_map(table->read_set, old_read_set);
-	}
-}
-
 /*******************************************************************//**
 This function checks that index keys are sensible.
 @return 0 or error number */
@@ -6378,24 +6318,7 @@ prepare_inplace_alter_table_dict(
 			}
 		}
 
-		if (ctx->online) {
-			/* Allocate a log for online table rebuild. */
-			rw_lock_x_lock(&clust_index->lock);
-			bool ok = row_log_allocate(
-				ctx->prebuilt->trx,
-				clust_index, ctx->new_table,
-				!(ha_alter_info->handler_flags
-				  & ALTER_ADD_PK_INDEX),
-				ctx->defaults, ctx->col_map, path,
-				old_table,
-				ctx->allow_not_null);
-			rw_lock_x_unlock(&clust_index->lock);
-
-			if (!ok) {
-				error = DB_OUT_OF_MEMORY;
-				goto error_handling;
-			}
-		}
+		DBUG_ASSERT(!ctx->online); // FIXME: Remove more code.
 	} else if (ctx->num_to_add_index) {
 		ut_ad(!ctx->is_instant());
 		ctx->trx->table_id = user_table->id;
@@ -6443,11 +6366,7 @@ prepare_inplace_alter_table_dict(
 				rw_lock_x_lock(&ctx->add_index[a]->lock);
 
 				bool ok = row_log_allocate(
-					ctx->prebuilt->trx,
-					index,
-					NULL, true, NULL, NULL,
-					path, old_table,
-					ctx->allow_not_null);
+					index, path, old_table);
 
 				rw_lock_x_unlock(&index->lock);
 
@@ -7641,8 +7560,6 @@ ha_innobase::prepare_inplace_alter_table(
 					ha_alter_info->online,
 					heap, indexed_table,
 					col_names, ULINT_UNDEFINED, 0, 0,
-					(ha_alter_info->ignore
-					 || !thd_is_strict_mode(m_user_thd)),
 					alt_opt.page_compressed,
 					alt_opt.page_compression_level);
 		}
@@ -7773,7 +7690,6 @@ ha_innobase::prepare_inplace_alter_table(
 		add_autoinc_col_no,
 		ha_alter_info->create_info->auto_increment_value,
 		autoinc_col_max_value,
-		ha_alter_info->ignore || !thd_is_strict_mode(m_user_thd),
 		alt_opt.page_compressed, alt_opt.page_compression_level);
 
 	DBUG_RETURN(prepare_inplace_alter_table_dict(
@@ -8008,18 +7924,11 @@ ha_innobase::inplace_alter_table(
 		ctx->add_index, ctx->add_key_numbers, ctx->num_to_add_index,
 		altered_table, ctx->defaults, ctx->col_map,
 		ctx->add_autoinc, ctx->sequence, ctx->skip_pk_sort,
-		ctx->m_stage, add_v, eval_table, ctx->allow_not_null);
+		ctx->m_stage, add_v, eval_table);
 
 #ifndef DBUG_OFF
 oom:
 #endif /* !DBUG_OFF */
-	if (error == DB_SUCCESS && ctx->online && ctx->need_rebuild()) {
-		DEBUG_SYNC_C("row_log_table_apply1_before");
-		error = row_log_table_apply(
-			ctx->thr, m_prebuilt->table, altered_table,
-			ctx->m_stage, ctx->new_table);
-	}
-
 	/* Init online ddl status variables */
 	onlineddl_rowlog_rows = 0;
 	onlineddl_rowlog_pct_used = 0;
@@ -9027,11 +8936,7 @@ commit_set_autoinc(
 		ctx->new_table->autoinc = ctx->old_table->autoinc;
 		/* The persistent value was already copied in
 		prepare_inplace_alter_table_dict() when ctx->new_table
-		was created. If this was a LOCK=NONE operation, the
-		AUTO_INCREMENT values would be updated during
-		row_log_table_apply(). If this was LOCK!=NONE,
-		the table contents could not possibly have changed
-		between prepare_inplace and commit_inplace. */
+		was created. */
 	}
 
 	DBUG_RETURN(false);
@@ -9439,74 +9344,6 @@ commit_try_rebuild(
 		index->to_be_dropped = 0;
 	}
 
-	/* We copied the table. Any indexes that were requested to be
-	dropped were not created in the copy of the table. Apply any
-	last bit of the rebuild log and then rename the tables. */
-
-	if (ctx->online) {
-		DEBUG_SYNC_C("row_log_table_apply2_before");
-
-		dict_vcol_templ_t* s_templ  = NULL;
-
-		if (ctx->new_table->n_v_cols > 0) {
-			s_templ = UT_NEW_NOKEY(
-					dict_vcol_templ_t());
-			s_templ->vtempl = NULL;
-
-			innobase_build_v_templ(
-				altered_table, ctx->new_table, s_templ,
-				NULL, true);
-			ctx->new_table->vc_templ = s_templ;
-		}
-
-		error = row_log_table_apply(
-			ctx->thr, user_table, altered_table,
-			static_cast<ha_innobase_inplace_ctx*>(
-				ha_alter_info->handler_ctx)->m_stage,
-			ctx->new_table);
-
-		if (s_templ) {
-			ut_ad(ctx->need_rebuild());
-			dict_free_vc_templ(s_templ);
-			UT_DELETE(s_templ);
-			ctx->new_table->vc_templ = NULL;
-		}
-
-		ulint	err_key = thr_get_trx(ctx->thr)->error_key_num;
-
-		switch (error) {
-			KEY*	dup_key;
-		case DB_SUCCESS:
-			break;
-		case DB_DUPLICATE_KEY:
-			if (err_key == ULINT_UNDEFINED) {
-				/* This should be the hidden index on
-				FTS_DOC_ID. */
-				dup_key = NULL;
-			} else {
-				DBUG_ASSERT(err_key <
-					    ha_alter_info->key_count);
-				dup_key = &ha_alter_info
-					->key_info_buffer[err_key];
-			}
-			print_keydup_error(altered_table, dup_key, MYF(0));
-			DBUG_RETURN(true);
-		case DB_ONLINE_LOG_TOO_BIG:
-			my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
-				 get_error_key_name(err_key, ha_alter_info,
-						    rebuilt_table));
-			DBUG_RETURN(true);
-		case DB_INDEX_CORRUPT:
-			my_error(ER_INDEX_CORRUPT, MYF(0),
-				 get_error_key_name(err_key, ha_alter_info,
-						    rebuilt_table));
-			DBUG_RETURN(true);
-		default:
-			my_error_innodb(error, table_name, user_table->flags);
-			DBUG_RETURN(true);
-		}
-	}
-
 	if ((ha_alter_info->handler_flags
 	     & ALTER_COLUMN_NAME)
 	    && innobase_rename_columns_try(ha_alter_info, ctx, old_table,
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 6dcc40db70a..d66b79199f1 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -225,7 +225,10 @@ dict_table_get_first_index(
 	ut_ad(table);
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
 
-	return(UT_LIST_GET_FIRST(((dict_table_t*) table)->indexes));
+	dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+	ut_ad(!dict_index_is_online_ddl(index));
+
+	return index;
 }
 
 /********************************************************************//**
diff --git a/storage/innobase/include/handler0alter.h b/storage/innobase/include/handler0alter.h
index 81c0fd18a29..813394dc25e 100644
--- a/storage/innobase/include/handler0alter.h
+++ b/storage/innobase/include/handler0alter.h
@@ -43,16 +43,6 @@ innobase_fields_to_mysql(
 	const dfield_t*		fields)	/*!< in: InnoDB index fields */
 	MY_ATTRIBUTE((nonnull));
 
-/*************************************************************//**
-Copies an InnoDB row to table->record[0]. */
-void
-innobase_row_to_mysql(
-/*==================*/
-	struct TABLE*		table,	/*!< in/out: MySQL table */
-	const dict_table_t*	itab,	/*!< in: InnoDB table */
-	const dtuple_t*		row)	/*!< in: InnoDB row */
-	MY_ATTRIBUTE((nonnull));
-
 /** Generate the next autoinc based on a snapshot of the session
 auto_increment_increment and auto_increment_offset variables. */
 struct ib_sequence_t {
diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index 6d3dc64211f..2f83d8f8faf 100644
--- a/storage/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
@@ -100,34 +100,6 @@ row_ins_clust_index_entry_low(
 				and return. don't execute actual insert. */
 	MY_ATTRIBUTE((warn_unused_result));
 
-/***************************************************************//**
-Tries to insert an entry into a secondary index. If a record with exactly the
-same fields is found, the other record is necessarily marked deleted.
-It is then unmarked. Otherwise, the entry is just inserted to the index.
-@retval DB_SUCCESS on success
-@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
-@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
-@return error code */
-dberr_t
-row_ins_sec_index_entry_low(
-/*========================*/
-	ulint		flags,	/*!< in: undo logging and locking flags */
-	ulint		mode,	/*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
-				depending on whether we wish optimistic or
-				pessimistic descent down the index tree */
-	dict_index_t*	index,	/*!< in: secondary index */
-	mem_heap_t*	offsets_heap,
-				/*!< in/out: memory heap that can be emptied */
-	mem_heap_t*	heap,	/*!< in/out: memory heap */
-	dtuple_t*	entry,	/*!< in/out: index entry to insert */
-	trx_id_t	trx_id,	/*!< in: PAGE_MAX_TRX_ID during
-				row_log_table_apply(), or 0 */
-	que_thr_t*	thr,	/*!< in: query thread */
-	bool		dup_chk_only)
-				/*!< in: if true, just do duplicate check
-				and return. don't execute actual insert. */
-	MY_ATTRIBUTE((warn_unused_result));
-
 /***************************************************************//**
 Inserts an entry into a clustered index. Tries first optimistic,
 then pessimistic descent down the tree. If the entry matches enough
diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h
index 723cf310f95..2f5dee1aff9 100644
--- a/storage/innobase/include/row0log.h
+++ b/storage/innobase/include/row0log.h
@@ -49,21 +49,9 @@ for online creation.
 bool
 row_log_allocate(
 /*=============*/
-	const trx_t*	trx,	/*!< in: the ALTER TABLE transaction */
 	dict_index_t*	index,	/*!< in/out: index */
-	dict_table_t*	table,	/*!< in/out: new table being rebuilt,
-				or NULL when creating a secondary index */
-	bool		same_pk,/*!< in: whether the definition of the
-				PRIMARY KEY has remained the same */
-	const dtuple_t*	defaults,
-				/*!< in: default values of
-				added, changed columns, or NULL */
-	const ulint*	col_map,/*!< in: mapping of old column
-				numbers to new ones, or NULL if !table */
 	const char*	path,	/*!< in: where to create temporary file */
-	const TABLE*	old_table,	/*!< in:table definition before alter */
-	bool		allow_not_null) /*!< in: allow null to non-null
-					conversion */
+	const TABLE*	old_table)	/*!< in:table definition before alter */
 	MY_ATTRIBUTE((nonnull(1), warn_unused_result));
 
 /******************************************************//**
@@ -108,120 +96,6 @@ row_log_online_op(
 				or 0 for delete */
 	ATTRIBUTE_COLD __attribute__((nonnull));
 
-/******************************************************//**
-Gets the error status of the online index rebuild log.
-@return DB_SUCCESS or error code */
-dberr_t
-row_log_table_get_error(
-/*====================*/
-	const dict_index_t*	index)	/*!< in: clustered index of a table
-					that is being rebuilt online */
-	MY_ATTRIBUTE((nonnull, warn_unused_result));
-
-/** Check whether a virtual column is indexed in the new table being
-created during alter table
-@param[in]	index	cluster index
-@param[in]	v_no	virtual column number
-@return true if it is indexed, else false */
-bool
-row_log_col_is_indexed(
-	const dict_index_t*	index,
-	ulint			v_no);
-
-/******************************************************//**
-Logs a delete operation to a table that is being rebuilt.
-This will be merged in row_log_table_apply_delete(). */
-void
-row_log_table_delete(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	const byte*	sys)	/*!< in: DB_TRX_ID,DB_ROLL_PTR that should
-				be logged, or NULL to use those in rec */
-	ATTRIBUTE_COLD __attribute__((nonnull(1,2,3)));
-
-/******************************************************//**
-Logs an update operation to a table that is being rebuilt.
-This will be merged in row_log_table_apply_update(). */
-void
-row_log_table_update(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	const dtuple_t*	old_pk);/*!< in: row_log_table_get_pk()
-				before the update */
-
-/******************************************************//**
-Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR
-of a table that is being rebuilt.
-@return tuple of PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR in the rebuilt table,
-or NULL if the PRIMARY KEY definition does not change */
-const dtuple_t*
-row_log_table_get_pk(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index),
-				or NULL */
-	byte*		sys,	/*!< out: DB_TRX_ID,DB_ROLL_PTR for
-				row_log_table_delete(), or NULL */
-	mem_heap_t**	heap)	/*!< in/out: memory heap where allocated */
-	ATTRIBUTE_COLD __attribute__((nonnull(1,2,5), warn_unused_result));
-
-/******************************************************//**
-Logs an insert to a table that is being rebuilt.
-This will be merged in row_log_table_apply_insert(). */
-void
-row_log_table_insert(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets);/*!< in: rec_get_offsets(rec,index) */
-/******************************************************//**
-Notes that a BLOB is being freed during online ALTER TABLE. */
-void
-row_log_table_blob_free(
-/*====================*/
-	dict_index_t*	index,	/*!< in/out: clustered index, X-latched */
-	ulint		page_no)/*!< in: starting page number of the BLOB */
-	ATTRIBUTE_COLD __attribute__((nonnull));
-/******************************************************//**
-Notes that a BLOB is being allocated during online ALTER TABLE. */
-void
-row_log_table_blob_alloc(
-/*=====================*/
-	dict_index_t*	index,	/*!< in/out: clustered index, X-latched */
-	ulint		page_no)/*!< in: starting page number of the BLOB */
-	ATTRIBUTE_COLD __attribute__((nonnull));
-
-/** Apply the row_log_table log to a table upon completing rebuild.
-@param[in]	thr		query graph
-@param[in]	old_table	old table
-@param[in,out]	table		MySQL table (for reporting duplicates)
-@param[in,out]	stage		performance schema accounting object, used by
-ALTER TABLE. stage->begin_phase_log_table() will be called initially and then
-stage->inc() will be called for each block of log that is applied.
-@param[in]	new_table	Altered table
-@return DB_SUCCESS, or error code on failure */
-dberr_t
-row_log_table_apply(
-	que_thr_t*		thr,
-	dict_table_t*		old_table,
-	struct TABLE*		table,
-	ut_stage_alter_t*	stage,
-	dict_table_t*		new_table)
-	MY_ATTRIBUTE((warn_unused_result));
-
 /******************************************************//**
 Get the latest transaction ID that has invoked row_log_online_op()
 during online creation.
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index ad4005239c3..7330031c186 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -322,7 +322,6 @@ this function and it will be passed to other functions for further accounting.
 @param[in]	add_v		new virtual columns added along with indexes
 @param[in]	eval_table	mysql table used to evaluate virtual column
 				value, see innobase_get_computed_value().
-@param[in]	allow_non_null	allow the conversion from null to not-null
 @return DB_SUCCESS or error code */
 dberr_t
 row_merge_build_indexes(
@@ -341,8 +340,7 @@ row_merge_build_indexes(
 	bool			skip_pk_sort,
 	ut_stage_alter_t*	stage,
 	const dict_add_v_col_t*	add_v,
-	struct TABLE*		eval_table,
-	bool			allow_non_null)
+	struct TABLE*		eval_table)
 	MY_ATTRIBUTE((warn_unused_result));
 
 /********************************************************************//**
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 25a71ffbafe..8606e3aeb07 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -638,10 +638,6 @@ extern PSI_stage_info	srv_stage_alter_table_insert;
 row_log_apply(). */
 extern PSI_stage_info	srv_stage_alter_table_log_index;
 
-/** Performance schema stage event for monitoring ALTER TABLE progress
-row_log_table_apply(). */
-extern PSI_stage_info	srv_stage_alter_table_log_table;
-
 /** Performance schema stage event for monitoring ALTER TABLE progress
 row_merge_sort(). */
 extern PSI_stage_info	srv_stage_alter_table_merge_sort;
diff --git a/storage/innobase/include/ut0stage.h b/storage/innobase/include/ut0stage.h
index 4b96fad3c21..be8c89d1e5c 100644
--- a/storage/innobase/include/ut0stage.h
+++ b/storage/innobase/include/ut0stage.h
@@ -67,9 +67,6 @@ if any new indexes are being added, for each one:
 begin_phase_flush()
     multiple times:
       inc() // once per page flushed
-begin_phase_log_table()
-    multiple times:
-      inc() // once per log-block applied
 begin_phase_end()
 destructor
 
@@ -147,10 +144,6 @@ class ut_stage_alter_t {
 	void
 	begin_phase_log_index();
 
-	/** Flag the beginning of the log table phase. */
-	void
-	begin_phase_log_table();
-
 	/** Flag the beginning of the end phase. */
 	void
 	begin_phase_end();
@@ -211,8 +204,7 @@ class ut_stage_alter_t {
 		LOG_INDEX = 5,
 		LOG_TABLE = 6, */
 		LOG_INNODB_INDEX = 5,
-		LOG_INNODB_TABLE = 6,
-		END = 7,
+		END = 6,
 	}			m_cur_phase;
 };
 
@@ -327,7 +319,6 @@ ut_stage_alter_t::inc(ulint)
 	case LOG_TABLE:
 	break; */
 	case LOG_INNODB_INDEX:
-	case LOG_INNODB_TABLE:
 		break;
 	case END:
 		break;
@@ -412,14 +403,6 @@ ut_stage_alter_t::begin_phase_log_index()
 	change_phase(&srv_stage_alter_table_log_index);
 }
 
-/** Flag the beginning of the log table phase. */
-inline
-void
-ut_stage_alter_t::begin_phase_log_table()
-{
-	change_phase(&srv_stage_alter_table_log_table);
-}
-
 /** Flag the beginning of the end phase. */
 inline
 void
@@ -437,21 +420,6 @@ ut_stage_alter_t::reestimate()
 		return;
 	}
 
-	/* During the log table phase we calculate the estimate as
-	work done so far + log size remaining. */
-	if (m_cur_phase == LOG_INNODB_TABLE) {
-		/* TODO: MySQL 5.7 PSI
-		mysql_stage_set_work_estimated(
-			m_progress,
-			mysql_stage_get_work_completed(m_progress)
-			+ row_log_estimate_work(m_pk));
-		*/
-		return;
-	}
-
-	/* During the other phases we use a formula, regardless of
-	how much work has been done so far. */
-
 	/* For number of pages in the PK - if the PK has not been
 	read yet, use stat_n_leaf_pages (approximate), otherwise
 	use the exact number we gathered. */
@@ -548,8 +516,6 @@ class ut_stage_alter_t {
 
 	void begin_phase_log_index() {}
 
-	void begin_phase_log_table() {}
-
 	void begin_phase_end() {}
 };
 
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 5dc437dbcd7..84c4087b75d 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -2646,6 +2646,8 @@ lock_move_reorganize_page(
 		return;
 	}
 
+	DBUG_ASSERT(lock->index->table->space->id == block->page.id.space());
+
 	heap = mem_heap_create(256);
 
 	/* Copy first all the locks on the page to heap and reset the
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 777d69cf127..b4bd2eb471b 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -2211,85 +2211,6 @@ row_ins_scan_sec_index_for_duplicate(
 	DBUG_RETURN(err);
 }
 
-/** Checks for a duplicate when the table is being rebuilt online.
-@retval DB_SUCCESS when no duplicate is detected
-@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
-a newer version of entry (the entry should not be inserted)
-@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_ins_duplicate_online(
-/*=====================*/
-	ulint		n_uniq,	/*!< in: offset of DB_TRX_ID */
-	const dtuple_t*	entry,	/*!< in: entry that is being inserted */
-	const rec_t*	rec,	/*!< in: clustered index record */
-	ulint*		offsets)/*!< in/out: rec_get_offsets(rec) */
-{
-	ulint	fields	= 0;
-
-	/* During rebuild, there should not be any delete-marked rows
-	in the new table. */
-	ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
-	ut_ad(dtuple_get_n_fields_cmp(entry) == n_uniq);
-
-	/* Compare the PRIMARY KEY fields and the
-	DB_TRX_ID, DB_ROLL_PTR. */
-	cmp_dtuple_rec_with_match_low(
-		entry, rec, offsets, n_uniq + 2, &fields);
-
-	if (fields < n_uniq) {
-		/* Not a duplicate. */
-		return(DB_SUCCESS);
-	}
-
-	if (fields == n_uniq + 2) {
-		/* rec is an exact match of entry. */
-		return(DB_SUCCESS_LOCKED_REC);
-	}
-
-	return(DB_DUPLICATE_KEY);
-}
-
-/** Checks for a duplicate when the table is being rebuilt online.
-@retval DB_SUCCESS when no duplicate is detected
-@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
-a newer version of entry (the entry should not be inserted)
-@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_ins_duplicate_error_in_clust_online(
-/*====================================*/
-	ulint		n_uniq,	/*!< in: offset of DB_TRX_ID */
-	const dtuple_t*	entry,	/*!< in: entry that is being inserted */
-	const btr_cur_t*cursor,	/*!< in: cursor on insert position */
-	ulint**		offsets,/*!< in/out: rec_get_offsets(rec) */
-	mem_heap_t**	heap)	/*!< in/out: heap for offsets */
-{
-	dberr_t		err	= DB_SUCCESS;
-	const rec_t*	rec	= btr_cur_get_rec(cursor);
-
-	ut_ad(!cursor->index->is_instant());
-
-	if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
-		*offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
-					   ULINT_UNDEFINED, heap);
-		err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
-		if (err != DB_SUCCESS) {
-			return(err);
-		}
-	}
-
-	rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
-
-	if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
-		*offsets = rec_get_offsets(rec, cursor->index, *offsets, true,
-					   ULINT_UNDEFINED, heap);
-		err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
-	}
-
-	return(err);
-}
-
 /***************************************************************//**
 Checks if a unique key violation error would occur at an index entry
 insert. Sets shared locks on possible duplicate records. Works only
@@ -2523,11 +2444,6 @@ row_ins_index_entry_big_rec(
 		&pcur, offsets, big_rec, &mtr, BTR_STORE_INSERT);
 	DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern");
 
-	if (error == DB_SUCCESS
-	    && dict_index_is_online_ddl(index)) {
-		row_log_table_insert(btr_pcur_get_rec(&pcur), index, offsets);
-	}
-
 	mtr.commit();
 
 	btr_pcur_close(&pcur);
@@ -2590,19 +2506,12 @@ row_ins_clust_index_entry_low(
 		Disable locking as temp-tables are local to a connection. */
 
 		ut_ad(flags & BTR_NO_LOCKING_FLAG);
-		ut_ad(!dict_index_is_online_ddl(index));
 		ut_ad(!index->table->persistent_autoinc);
 		ut_ad(!index->is_instant());
 		mtr.set_log_mode(MTR_LOG_NO_REDO);
 	} else {
 		index->set_modified(mtr);
 
-		if (mode == BTR_MODIFY_LEAF
-		    && dict_index_is_online_ddl(index)) {
-			mode = BTR_MODIFY_LEAF_ALREADY_S_LATCHED;
-			mtr_s_lock(dict_index_get_lock(index), &mtr);
-		}
-
 		if (unsigned ai = index->table->persistent_autoinc) {
 			/* Prepare to persist the AUTO_INCREMENT value
 			from the index entry to PAGE_ROOT_AUTO_INC. */
@@ -2648,7 +2557,6 @@ row_ins_clust_index_entry_low(
 		ut_ad(entry->is_metadata());
 		ut_ad(flags == BTR_NO_LOCKING_FLAG);
 		ut_ad(index->is_instant());
-		ut_ad(!dict_index_is_online_ddl(index));
 		ut_ad(!dup_chk_only);
 
 		const rec_t* rec = btr_cur_get_rec(cursor);
@@ -2671,32 +2579,11 @@ row_ins_clust_index_entry_low(
 	if (n_uniq
 	    && (cursor->up_match >= n_uniq || cursor->low_match >= n_uniq)) {
 
-		if (flags
-		    == (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
-			| BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) {
-			/* Set no locks when applying log
-			in online table rebuild. Only check for duplicates. */
-			err = row_ins_duplicate_error_in_clust_online(
-				n_uniq, entry, cursor,
-				&offsets, &offsets_heap);
-
-			switch (err) {
-			case DB_SUCCESS:
-				break;
-			default:
-				ut_ad(0);
-				/* fall through */
-			case DB_SUCCESS_LOCKED_REC:
-			case DB_DUPLICATE_KEY:
-				thr_get_trx(thr)->error_info = cursor->index;
-			}
-		} else {
-			/* Note that the following may return also
-			DB_LOCK_WAIT */
-
-			err = row_ins_duplicate_error_in_clust(
-				flags, cursor, entry, thr);
-		}
+		ut_ad(flags != (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
+				| BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG));
+		/* Note that the following may return also DB_LOCK_WAIT */
+		err = row_ins_duplicate_error_in_clust(
+			flags, cursor, entry, thr);
 
 		if (err != DB_SUCCESS) {
 err_exit:
@@ -2722,11 +2609,6 @@ row_ins_clust_index_entry_low(
 			&pcur, flags, mode, &offsets, &offsets_heap,
 			entry_heap, entry, thr, &mtr);
 
-		if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
-			row_log_table_insert(btr_cur_get_rec(cursor),
-					     index, offsets);
-		}
-
 		mtr_commit(&mtr);
 		mem_heap_free(entry_heap);
 	} else {
@@ -2765,14 +2647,9 @@ row_ins_clust_index_entry_low(
 			}
 		}
 
-		if (big_rec != NULL) {
-			mtr_commit(&mtr);
-
-			/* Online table rebuild could read (and
-			ignore) the incomplete record at this point.
-			If online rebuild is in progress, the
-			row_ins_index_entry_big_rec() will write log. */
+		mtr.commit();
 
+		if (big_rec != NULL) {
 			DBUG_EXECUTE_IF(
 				"row_ins_extern_checkpoint",
 				log_make_checkpoint_at(
@@ -2781,14 +2658,6 @@ row_ins_clust_index_entry_low(
 				entry, big_rec, offsets, &offsets_heap, index,
 				thr_get_trx(thr)->mysql_thd);
 			dtuple_convert_back_big_rec(index, entry, big_rec);
-		} else {
-			if (err == DB_SUCCESS
-			    && dict_index_is_online_ddl(index)) {
-				row_log_table_insert(
-					insert_rec, index, offsets);
-			}
-
-			mtr_commit(&mtr);
 		}
 	}
 
@@ -2858,6 +2727,7 @@ It is then unmarked. Otherwise, the entry is just inserted to the index.
 @retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
 @retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
 @return error code */
+static
 dberr_t
 row_ins_sec_index_entry_low(
 /*========================*/
@@ -2870,8 +2740,7 @@ row_ins_sec_index_entry_low(
 				/*!< in/out: memory heap that can be emptied */
 	mem_heap_t*	heap,	/*!< in/out: memory heap */
 	dtuple_t*	entry,	/*!< in/out: index entry to insert */
-	trx_id_t	trx_id,	/*!< in: PAGE_MAX_TRX_ID during
-				row_log_table_apply(), or 0 */
+	trx_id_t	trx_id,	/*!< in: PAGE_MAX_TRX_ID, or 0 */
 	que_thr_t*	thr,	/*!< in: query thread */
 	bool		dup_chk_only)
 				/*!< in: if true, just do duplicate check
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 07772fc5468..b7b3a4047ba 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -47,17 +47,6 @@ ulint onlineddl_rowlog_rows;
 ulint onlineddl_rowlog_pct_used;
 ulint onlineddl_pct_progress;
 
-/** Table row modification operations during online table rebuild.
-Delete-marked records are not copied to the rebuilt table. */
-enum row_tab_op {
-	/** Insert a record */
-	ROW_T_INSERT = 0x41,
-	/** Update a record in place */
-	ROW_T_UPDATE,
-	/** Delete (purge) a record */
-	ROW_T_DELETE
-};
-
 /** Index record modification operations during online index creation */
 enum row_op {
 	/** Insert a record */
@@ -80,94 +69,13 @@ struct row_log_buf_t {
 				that spans two blocks */
 	ulint		blocks; /*!< current position in blocks */
 	ulint		bytes;	/*!< current position within block */
-	ulonglong	total;	/*!< logical position, in bytes from
-				the start of the row_log_table log;
-				0 for row_log_online_op() and
-				row_log_apply(). */
-};
-
-/** Tracks BLOB allocation during online ALTER TABLE */
-class row_log_table_blob_t {
-public:
-	/** Constructor (declaring a BLOB freed)
-	@param offset_arg row_log_t::tail::total */
-#ifdef UNIV_DEBUG
-	row_log_table_blob_t(ulonglong offset_arg) :
-		old_offset (0), free_offset (offset_arg),
-		offset (BLOB_FREED) {}
-#else /* UNIV_DEBUG */
-	row_log_table_blob_t() :
-		offset (BLOB_FREED) {}
-#endif /* UNIV_DEBUG */
-
-	/** Declare a BLOB freed again.
-	@param offset_arg row_log_t::tail::total */
-#ifdef UNIV_DEBUG
-	void blob_free(ulonglong offset_arg)
-#else /* UNIV_DEBUG */
-	void blob_free()
-#endif /* UNIV_DEBUG */
-	{
-		ut_ad(offset < offset_arg);
-		ut_ad(offset != BLOB_FREED);
-		ut_d(old_offset = offset);
-		ut_d(free_offset = offset_arg);
-		offset = BLOB_FREED;
-	}
-	/** Declare a freed BLOB reused.
-	@param offset_arg row_log_t::tail::total */
-	void blob_alloc(ulonglong offset_arg) {
-		ut_ad(free_offset <= offset_arg);
-		ut_d(old_offset = offset);
-		offset = offset_arg;
-	}
-	/** Determine if a BLOB was freed at a given log position
-	@param offset_arg row_log_t::head::total after the log record
-	@return true if freed */
-	bool is_freed(ulonglong offset_arg) const {
-		/* This is supposed to be the offset at the end of the
-		current log record. */
-		ut_ad(offset_arg > 0);
-		/* We should never get anywhere close the magic value. */
-		ut_ad(offset_arg < BLOB_FREED);
-		return(offset_arg < offset);
-	}
-private:
-	/** Magic value for a freed BLOB */
-	static const ulonglong BLOB_FREED = ~0ULL;
-#ifdef UNIV_DEBUG
-	/** Old offset, in case a page was freed, reused, freed, ... */
-	ulonglong	old_offset;
-	/** Offset of last blob_free() */
-	ulonglong	free_offset;
-#endif /* UNIV_DEBUG */
-	/** Byte offset to the log file */
-	ulonglong	offset;
 };
 
-/** @brief Map of off-page column page numbers to 0 or log byte offsets.
-
-If there is no mapping for a page number, it is safe to access.
-If a page number maps to 0, it is an off-page column that has been freed.
-If a page number maps to a nonzero number, the number is a byte offset
-into the index->online_log, indicating that the page is safe to access
-when applying log records starting from that offset. */
-typedef std::map<
-	ulint,
-	row_log_table_blob_t,
-	std::less<ulint>,
-	ut_allocator<std::pair<const ulint, row_log_table_blob_t> > >
-	page_no_map;
-
 /** @brief Buffer for logging modifications during online index creation
 
 All modifications to an index that is being created will be logged by
 row_log_online_op() to this buffer.
 
-All modifications to a table that is being rebuilt will be logged by
-row_log_table_delete(), row_log_table_update(), row_log_table_insert()
-to this buffer.
-
 When head.blocks == tail.blocks, the reader will access tail.block
 directly. When also head.bytes == tail.bytes, both counts will be
 reset to 0 and the file will be truncated. */
@@ -175,36 +83,8 @@ struct row_log_t {
 	pfs_os_file_t	fd;	/*!< file descriptor */
 	ib_mutex_t	mutex;	/*!< mutex protecting error,
 				max_trx and tail */
-	page_no_map*	blobs;	/*!< map of page numbers of off-page columns
-				that have been freed during table-rebuilding
-				ALTER TABLE (row_log_table_*); protected by
-				index->lock X-latch only */
-	dict_table_t*	table;	/*!< table that is being rebuilt,
-				or NULL when this is a secondary
-				index that is being created online */
-	bool		same_pk;/*!< whether the definition of the PRIMARY KEY
-				has remained the same */
-	const dtuple_t*	defaults;
-				/*!< default values of added, changed columns,
-				or NULL */
-	const ulint*	col_map;/*!< mapping of old column numbers to
-				new ones, or NULL if !table */
 	dberr_t		error;	/*!< error that occurred during online
-				table rebuild */
-	/** The transaction ID of the ALTER TABLE transaction.  Any
-	concurrent DML would necessarily be logged with a larger
-	transaction ID, because ha_innobase::prepare_inplace_alter_table()
-	acts as a barrier that ensures that any concurrent transaction
-	that operates on the table would have been started after
-	ha_innobase::prepare_inplace_alter_table() returns and before
-	ha_innobase::commit_inplace_alter_table(commit=true) is invoked.
-
-	Due to the nondeterministic nature of purge and due to the
-	possibility of upgrading from an earlier version of MariaDB
-	or MySQL, it is possible that row_log_table_low() would be
-	fed DB_TRX_ID that precedes than min_trx. We must normalize
-	such references to reset_trx_id[]. */
-	trx_id_t	min_trx;
+				index creation */
 	trx_id_t	max_trx;/*!< biggest observed trx_id in
 				row_log_online_op();
 				protected by mutex and index->lock S-latch,
@@ -222,37 +102,9 @@ struct row_log_t {
 				decryption or NULL */
 	const char*	path;	/*!< where to create temporary file during
 				log operation */
-	/** the number of core fields in the clustered index of the
-	source table; before row_log_table_apply() completes, the
-	table could be emptied, so that table->is_instant() no longer holds,
-	but all log records must be in the "instant" format. */
-	unsigned	n_core_fields;
-	/** the default values of non-core fields when the operation started */
-	dict_col_t::def_t* non_core_fields;
-	bool		allow_not_null; /*!< Whether the alter ignore is being
-				used or if the sql mode is non-strict mode;
-				if not, NULL values will not be converted to
-				defaults */
 	const TABLE*	old_table; /*< Use old table in case of error. */
 
 	uint64_t	n_rows; /*< Number of rows read from the table */
-	/** Determine whether the log should be in the 'instant ADD' format
-	@param[in]	index	the clustered index of the source table
-	@return	whether to use the 'instant ADD COLUMN' format */
-	bool is_instant(const dict_index_t* index) const
-	{
-		ut_ad(table);
-		ut_ad(n_core_fields <= index->n_fields);
-		return n_core_fields != index->n_fields;
-	}
-
-	const byte* instant_field_value(ulint n, ulint* len) const
-	{
-		ut_ad(n >= n_core_fields);
-		const dict_col_t::def_t& d= non_core_fields[n - n_core_fields];
-		*len = d.len;
-		return static_cast<const byte*>(d.data);
-	}
 };
 
 /** Create the file or online log if it does not exist.
@@ -475,2756 +327,42 @@ row_log_online_op(
 }
 
 /******************************************************//**
-Gets the error status of the online index rebuild log.
-@return DB_SUCCESS or error code */
-dberr_t
-row_log_table_get_error(
-/*====================*/
-	const dict_index_t*	index)	/*!< in: clustered index of a table
-					that is being rebuilt online */
-{
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_online_ddl(index));
-	return(index->online_log->error);
-}
-
-/******************************************************//**
-Starts logging an operation to a table that is being rebuilt.
-@return pointer to log, or NULL if no logging is necessary */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-byte*
-row_log_table_open(
-/*===============*/
-	row_log_t*	log,	/*!< in/out: online rebuild log */
-	ulint		size,	/*!< in: size of log record */
-	ulint*		avail)	/*!< out: available size for log record */
-{
-	mutex_enter(&log->mutex);
-
-	UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
-
-	if (log->error != DB_SUCCESS) {
-err_exit:
-		mutex_exit(&log->mutex);
-		return(NULL);
-	}
-
-	if (!row_log_block_allocate(log->tail)) {
-		log->error = DB_OUT_OF_MEMORY;
-		goto err_exit;
-	}
-
-	ut_ad(log->tail.bytes < srv_sort_buf_size);
-	*avail = srv_sort_buf_size - log->tail.bytes;
-
-	if (size > *avail) {
-		/* Make sure log->tail.buf is large enough */
-		ut_ad(size <= sizeof log->tail.buf);
-		return(log->tail.buf);
-	} else {
-		return(log->tail.block + log->tail.bytes);
-	}
-}
-
-/******************************************************//**
-Stops logging an operation to a table that is being rebuilt. */
-static MY_ATTRIBUTE((nonnull))
-void
-row_log_table_close_func(
-/*=====================*/
-	dict_index_t*	index,	/*!< in/out: online rebuilt index */
-#ifdef UNIV_DEBUG
-	const byte*	b,	/*!< in: end of log record */
-#endif /* UNIV_DEBUG */
-	ulint		size,	/*!< in: size of log record */
-	ulint		avail)	/*!< in: available size for log record */
-{
-	row_log_t*	log = index->online_log;
-
-	ut_ad(mutex_own(&log->mutex));
-
-	if (size >= avail) {
-		const os_offset_t	byte_offset
-			= (os_offset_t) log->tail.blocks
-			* srv_sort_buf_size;
-		IORequest		request(IORequest::WRITE);
-		byte*			buf = log->tail.block;
-
-		if (byte_offset + srv_sort_buf_size >= srv_online_max_size) {
-			goto write_failed;
-		}
-
-		if (size == avail) {
-			ut_ad(b == &buf[srv_sort_buf_size]);
-		} else {
-			ut_ad(b == log->tail.buf + size);
-			memcpy(buf + log->tail.bytes, log->tail.buf, avail);
-		}
-
-		UNIV_MEM_ASSERT_RW(buf, srv_sort_buf_size);
-
-		if (row_log_tmpfile(log) == OS_FILE_CLOSED) {
-			log->error = DB_OUT_OF_MEMORY;
-			goto err_exit;
-		}
-
-		/* If encryption is enabled encrypt buffer before writing it
-		to file system. */
-		if (log_tmp_is_encrypted()) {
-			if (!log_tmp_block_encrypt(
-				    log->tail.block, srv_sort_buf_size,
-				    log->crypt_tail, byte_offset,
-				    index->table->space->id)) {
-				log->error = DB_DECRYPTION_FAILED;
-				goto err_exit;
-			}
-
-			srv_stats.n_rowlog_blocks_encrypted.inc();
-			buf = log->crypt_tail;
-		}
-
-		log->tail.blocks++;
-		if (!os_file_write(
-			    request,
-			    "(modification log)",
-			    log->fd,
-			    buf, byte_offset, srv_sort_buf_size)) {
-write_failed:
-			log->error = DB_ONLINE_LOG_TOO_BIG;
-		}
-		UNIV_MEM_INVALID(log->tail.block, srv_sort_buf_size);
-		UNIV_MEM_INVALID(buf, srv_sort_buf_size);
-		memcpy(log->tail.block, log->tail.buf + avail, size - avail);
-		log->tail.bytes = size - avail;
-	} else {
-		log->tail.bytes += size;
-		ut_ad(b == log->tail.block + log->tail.bytes);
-	}
-
-	log->tail.total += size;
-	UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
-err_exit:
-	mutex_exit(&log->mutex);
-
-	my_atomic_addlint(&onlineddl_rowlog_rows, 1);
-	/* 10000 means 100.00%, 4525 means 45.25% */
-	onlineddl_rowlog_pct_used = static_cast<ulint>((log->tail.total * 10000) / srv_online_max_size);
-}
-
-#ifdef UNIV_DEBUG
-# define row_log_table_close(index, b, size, avail)	\
-	row_log_table_close_func(index, b, size, avail)
-#else /* UNIV_DEBUG */
-# define row_log_table_close(log, b, size, avail)	\
-	row_log_table_close_func(index, size, avail)
-#endif /* UNIV_DEBUG */
-
-/** Check whether a virtual column is indexed in the new table being
-created during alter table
-@param[in]	index	cluster index
-@param[in]	v_no	virtual column number
-@return true if it is indexed, else false */
+Allocate the row log for an index and flag the index
+for online creation.
+@retval true if success, false if not */
 bool
-row_log_col_is_indexed(
-	const dict_index_t*	index,
-	ulint			v_no)
-{
-	return(dict_table_get_nth_v_col(
-		index->online_log->table, v_no)->m_col.ord_part);
-}
-
-/******************************************************//**
-Logs a delete operation to a table that is being rebuilt.
-This will be merged in row_log_table_apply_delete(). */
-void
-row_log_table_delete(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	const byte*	sys)	/*!< in: DB_TRX_ID,DB_ROLL_PTR that should
-				be logged, or NULL to use those in rec */
-{
-	ulint		old_pk_extra_size;
-	ulint		old_pk_size;
-	ulint		mrec_size;
-	ulint		avail_size;
-	mem_heap_t*	heap		= NULL;
-	const dtuple_t*	old_pk;
-
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(rec_offs_validate(rec, index, offsets));
-	ut_ad(rec_offs_n_fields(offsets) == dict_index_get_n_fields(index));
-	ut_ad(rec_offs_size(offsets) <= sizeof index->online_log->tail.buf);
-	ut_ad(rw_lock_own_flagged(
-			&index->lock,
-			RW_LOCK_FLAG_S | RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
-
-	if (index->online_status != ONLINE_INDEX_CREATION
-	    || (index->type & DICT_CORRUPT) || index->table->corrupted
-	    || index->online_log->error != DB_SUCCESS) {
-		return;
-	}
-
-	dict_table_t* new_table = index->online_log->table;
-	dict_index_t* new_index = dict_table_get_first_index(new_table);
-
-	ut_ad(dict_index_is_clust(new_index));
-	ut_ad(!dict_index_is_online_ddl(new_index));
-	ut_ad(index->online_log->min_trx);
-
-	/* Create the tuple PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR in new_table. */
-	if (index->online_log->same_pk) {
-		dtuple_t*	tuple;
-		ut_ad(new_index->n_uniq == index->n_uniq);
-
-		/* The PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR are in the first
-		fields of the record. */
-		heap = mem_heap_create(
-			DATA_TRX_ID_LEN
-			+ DTUPLE_EST_ALLOC(new_index->first_user_field()));
-		old_pk = tuple = dtuple_create(heap,
-					       new_index->first_user_field());
-		dict_index_copy_types(tuple, new_index, tuple->n_fields);
-		dtuple_set_n_fields_cmp(tuple, new_index->n_uniq);
-
-		for (ulint i = 0; i < dtuple_get_n_fields(tuple); i++) {
-			ulint		len;
-			const void*	field	= rec_get_nth_field(
-				rec, offsets, i, &len);
-			dfield_t*	dfield	= dtuple_get_nth_field(
-				tuple, i);
-			ut_ad(len != UNIV_SQL_NULL);
-			ut_ad(!rec_offs_nth_extern(offsets, i));
-			dfield_set_data(dfield, field, len);
-		}
-
-		dfield_t* db_trx_id = dtuple_get_nth_field(
-			tuple, new_index->n_uniq);
-
-		const bool replace_sys_fields
-			= sys
-			|| trx_read_trx_id(static_cast<byte*>(db_trx_id->data))
-			< index->online_log->min_trx;
-
-		if (replace_sys_fields) {
-			if (!sys || trx_read_trx_id(sys)
-			    < index->online_log->min_trx) {
-				sys = reset_trx_id;
-			}
-
-			dfield_set_data(db_trx_id, sys, DATA_TRX_ID_LEN);
-			dfield_set_data(db_trx_id + 1, sys + DATA_TRX_ID_LEN,
-					DATA_ROLL_PTR_LEN);
-		}
-
-		ut_d(trx_id_check(db_trx_id->data,
-				  index->online_log->min_trx));
-	} else {
-		/* The PRIMARY KEY has changed. Translate the tuple. */
-		old_pk = row_log_table_get_pk(
-			rec, index, offsets, NULL, &heap);
-
-		if (!old_pk) {
-			ut_ad(index->online_log->error != DB_SUCCESS);
-			if (heap) {
-				goto func_exit;
-			}
-			return;
-		}
-	}
-
-	ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
-		      old_pk, old_pk->n_fields - 2)->len);
-	ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
-		      old_pk, old_pk->n_fields - 1)->len);
-	old_pk_size = rec_get_converted_size_temp(
-		new_index, old_pk->fields, old_pk->n_fields,
-		&old_pk_extra_size);
-	ut_ad(old_pk_extra_size < 0x100);
-
-	/* 2 = 1 (extra_size) + at least 1 byte payload */
-	mrec_size = 2 + old_pk_size;
-
-	if (byte* b = row_log_table_open(index->online_log,
-					 mrec_size, &avail_size)) {
-		*b++ = ROW_T_DELETE;
-		*b++ = static_cast<byte>(old_pk_extra_size);
-
-		rec_convert_dtuple_to_temp(
-			b + old_pk_extra_size, new_index,
-			old_pk->fields, old_pk->n_fields);
-
-		b += old_pk_size;
-
-		row_log_table_close(index, b, mrec_size, avail_size);
-	}
-
-func_exit:
-	mem_heap_free(heap);
-}
-
-/******************************************************//**
-Logs an insert or update to a table that is being rebuilt. */
-static
-void
-row_log_table_low_redundant(
-/*========================*/
-	const rec_t*		rec,	/*!< in: clustered index leaf
-					page record in ROW_FORMAT=REDUNDANT,
-					page X-latched */
-	dict_index_t*		index,	/*!< in/out: clustered index, S-latched
-					or X-latched */
-	bool			insert,	/*!< in: true if insert,
-					false if update */
-	const dtuple_t*		old_pk,	/*!< in: old PRIMARY KEY value
-					(if !insert and a PRIMARY KEY
-					is being created) */
-	const dict_index_t*	new_index)
-					/*!< in: clustered index of the
-					new table, not latched */
-{
-	ulint		old_pk_size;
-	ulint		old_pk_extra_size;
-	ulint		size;
-	ulint		extra_size;
-	ulint		mrec_size;
-	ulint		avail_size;
-	mem_heap_t*	heap		= NULL;
-	dtuple_t*	tuple;
-	const ulint	n_fields = rec_get_n_fields_old(rec);
-
-	ut_ad(!page_is_comp(page_align(rec)));
-	ut_ad(index->n_fields >= n_fields);
-	ut_ad(index->n_fields == n_fields || index->is_instant());
-	ut_ad(dict_tf2_is_valid(index->table->flags, index->table->flags2));
-	ut_ad(!dict_table_is_comp(index->table));  /* redundant row format */
-	ut_ad(dict_index_is_clust(new_index));
-
-	heap = mem_heap_create(DTUPLE_EST_ALLOC(n_fields));
-	tuple = dtuple_create(heap, n_fields);
-	dict_index_copy_types(tuple, index, n_fields);
-
-	dtuple_set_n_fields_cmp(tuple, dict_index_get_n_unique(index));
-
-	if (rec_get_1byte_offs_flag(rec)) {
-		for (ulint i = 0; i < n_fields; i++) {
-			dfield_t*	dfield;
-			ulint		len;
-			const void*	field;
-
-			dfield = dtuple_get_nth_field(tuple, i);
-			field = rec_get_nth_field_old(rec, i, &len);
-
-			dfield_set_data(dfield, field, len);
-		}
-	} else {
-		for (ulint i = 0; i < n_fields; i++) {
-			dfield_t*	dfield;
-			ulint		len;
-			const void*	field;
-
-			dfield = dtuple_get_nth_field(tuple, i);
-			field = rec_get_nth_field_old(rec, i, &len);
-
-			dfield_set_data(dfield, field, len);
-
-			if (rec_2_is_field_extern(rec, i)) {
-				dfield_set_ext(dfield);
-			}
-		}
-	}
-
-	dfield_t* db_trx_id = dtuple_get_nth_field(tuple, index->n_uniq);
-	ut_ad(dfield_get_len(db_trx_id) == DATA_TRX_ID_LEN);
-	ut_ad(dfield_get_len(db_trx_id + 1) == DATA_ROLL_PTR_LEN);
-
-	if (trx_read_trx_id(static_cast<const byte*>
-			    (dfield_get_data(db_trx_id)))
-	    < index->online_log->min_trx) {
-		dfield_set_data(db_trx_id, reset_trx_id, DATA_TRX_ID_LEN);
-		dfield_set_data(db_trx_id + 1, reset_trx_id + DATA_TRX_ID_LEN,
-				DATA_ROLL_PTR_LEN);
-	}
-
-	const bool is_instant = index->online_log->is_instant(index);
-	rec_comp_status_t status = is_instant
-		? REC_STATUS_INSTANT : REC_STATUS_ORDINARY;
-
-	size = rec_get_converted_size_temp(
-		index, tuple->fields, tuple->n_fields, &extra_size, status);
-	if (is_instant) {
-		size++;
-		extra_size++;
-	}
-
-	mrec_size = ROW_LOG_HEADER_SIZE + size + (extra_size >= 0x80);
-
-	if (insert || index->online_log->same_pk) {
-		ut_ad(!old_pk);
-		old_pk_extra_size = old_pk_size = 0;
-	} else {
-		ut_ad(old_pk);
-		ut_ad(old_pk->n_fields == 2 + old_pk->n_fields_cmp);
-		ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
-			      old_pk, old_pk->n_fields - 2)->len);
-		ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
-			      old_pk, old_pk->n_fields - 1)->len);
-
-		old_pk_size = rec_get_converted_size_temp(
-			new_index, old_pk->fields, old_pk->n_fields,
-			&old_pk_extra_size);
-		ut_ad(old_pk_extra_size < 0x100);
-		mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
-	}
-
-	if (byte* b = row_log_table_open(index->online_log,
-					 mrec_size, &avail_size)) {
-		if (insert) {
-			*b++ = ROW_T_INSERT;
-		} else {
-			*b++ = ROW_T_UPDATE;
-
-			if (old_pk_size) {
-				*b++ = static_cast<byte>(old_pk_extra_size);
-
-				rec_convert_dtuple_to_temp(
-					b + old_pk_extra_size, new_index,
-					old_pk->fields, old_pk->n_fields);
-				b += old_pk_size;
-			}
-		}
-
-		if (extra_size < 0x80) {
-			*b++ = static_cast<byte>(extra_size);
-		} else {
-			ut_ad(extra_size < 0x8000);
-			*b++ = static_cast<byte>(0x80 | (extra_size >> 8));
-			*b++ = static_cast<byte>(extra_size);
-		}
-
-		if (status == REC_STATUS_INSTANT) {
-			ut_ad(is_instant);
-			if (n_fields <= index->online_log->n_core_fields) {
-				status = REC_STATUS_ORDINARY;
-			}
-			*b = status;
-		}
-
-		rec_convert_dtuple_to_temp(
-			b + extra_size, index, tuple->fields, tuple->n_fields,
-			status);
-		b += size;
-
-		row_log_table_close(index, b, mrec_size, avail_size);
-	}
-
-	mem_heap_free(heap);
-}
-
-/******************************************************//**
-Logs an insert or update to a table that is being rebuilt. */
-static
-void
-row_log_table_low(
-/*==============*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	bool		insert,	/*!< in: true if insert, false if update */
-	const dtuple_t*	old_pk)	/*!< in: old PRIMARY KEY value (if !insert
-				and a PRIMARY KEY is being created) */
+row_log_allocate(
+/*=============*/
+	dict_index_t*	index,	/*!< in/out: index */
+	const char*	path,	/*!< in: where to create temporary file */
+	const TABLE*	old_table)	/*!< in: table definition before alter */
 {
-	ulint			old_pk_size;
-	ulint			old_pk_extra_size;
-	ulint			extra_size;
-	ulint			mrec_size;
-	ulint			avail_size;
-	const dict_index_t*	new_index;
-	row_log_t*		log = index->online_log;
-
-	new_index = dict_table_get_first_index(log->table);
-
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_clust(new_index));
-	ut_ad(!dict_index_is_online_ddl(new_index));
-	ut_ad(rec_offs_validate(rec, index, offsets));
-	ut_ad(rec_offs_n_fields(offsets) == dict_index_get_n_fields(index));
-	ut_ad(rec_offs_size(offsets) <= sizeof log->tail.buf);
-	ut_ad(rw_lock_own_flagged(
-			&index->lock,
-			RW_LOCK_FLAG_S | RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
-#ifdef UNIV_DEBUG
-	switch (fil_page_get_type(page_align(rec))) {
-	case FIL_PAGE_INDEX:
-		break;
-	case FIL_PAGE_TYPE_INSTANT:
-		ut_ad(index->is_instant());
-		ut_ad(page_is_root(page_align(rec)));
-		break;
-	default:
-		ut_ad(!"wrong page type");
-	}
-#endif /* UNIV_DEBUG */
-	ut_ad(!rec_is_metadata(rec, *index));
-	ut_ad(page_rec_is_leaf(rec));
-	ut_ad(!page_is_comp(page_align(rec)) == !rec_offs_comp(offsets));
-	/* old_pk=row_log_table_get_pk() [not needed in INSERT] is a prefix
-	of the clustered index record (PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR),
-	with no information on virtual columns */
-	ut_ad(!old_pk || !insert);
-	ut_ad(!old_pk || old_pk->n_v_fields == 0);
-
-	if (index->online_status != ONLINE_INDEX_CREATION
-	    || (index->type & DICT_CORRUPT) || index->table->corrupted
-	    || log->error != DB_SUCCESS) {
-		return;
-	}
-
-	if (!rec_offs_comp(offsets)) {
-		row_log_table_low_redundant(
-			rec, index, insert, old_pk, new_index);
-		return;
-	}
+	row_log_t*	log;
+	DBUG_ENTER("row_log_allocate");
 
-	ut_ad(page_is_comp(page_align(rec)));
-	ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY
-	      || rec_get_status(rec) == REC_STATUS_INSTANT);
-
-	const ulint omit_size = REC_N_NEW_EXTRA_BYTES;
-
-	const ulint rec_extra_size = rec_offs_extra_size(offsets) - omit_size;
-	const bool is_instant = log->is_instant(index);
-	extra_size = rec_extra_size + is_instant;
-
-	unsigned fake_extra_size = 0;
-	byte fake_extra_buf[2];
-	if (is_instant && UNIV_UNLIKELY(!index->is_instant())) {
-		/* The source table was emptied after ALTER TABLE
-		started, and it was converted to non-instant format.
-		Because row_log_table_apply_op() expects to find
-		all records to be logged in the same way, we will
-		be unable to copy the rec_extra_size bytes from the
-		record header, but must convert them here. */
-		unsigned n_add = index->n_fields - 1 - log->n_core_fields;
-		fake_extra_size = rec_get_n_add_field_len(n_add);
-		ut_ad(fake_extra_size == 1 || fake_extra_size == 2);
-		extra_size += fake_extra_size;
-		byte* fake_extra = fake_extra_buf + fake_extra_size - 1;
-		rec_set_n_add_field(fake_extra, n_add);
-		ut_ad(fake_extra + 1 == fake_extra_buf);
-	}
+	ut_ad(!dict_index_is_online_ddl(index));
+	ut_ad(!dict_index_is_clust(index));
+	ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X));
 
-	mrec_size = ROW_LOG_HEADER_SIZE
-		+ (extra_size >= 0x80) + rec_offs_size(offsets) - omit_size
-		+ is_instant + fake_extra_size;
+	log = static_cast<row_log_t*>(ut_malloc_nokey(sizeof *log));
 
-	if (insert || log->same_pk) {
-		ut_ad(!old_pk);
-		old_pk_extra_size = old_pk_size = 0;
-	} else {
-		ut_ad(old_pk);
-		ut_ad(old_pk->n_fields == 2 + old_pk->n_fields_cmp);
-		ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
-			      old_pk, old_pk->n_fields - 2)->len);
-		ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
-			      old_pk, old_pk->n_fields - 1)->len);
-
-		old_pk_size = rec_get_converted_size_temp(
-			new_index, old_pk->fields, old_pk->n_fields,
-			&old_pk_extra_size);
-		ut_ad(old_pk_extra_size < 0x100);
-		mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
+	if (log == NULL) {
+		DBUG_RETURN(false);
 	}
 
-	if (byte* b = row_log_table_open(log, mrec_size, &avail_size)) {
-		if (insert) {
-			*b++ = ROW_T_INSERT;
-		} else {
-			*b++ = ROW_T_UPDATE;
-
-			if (old_pk_size) {
-				*b++ = static_cast<byte>(old_pk_extra_size);
-
-				rec_convert_dtuple_to_temp(
-					b + old_pk_extra_size, new_index,
-					old_pk->fields, old_pk->n_fields);
-				b += old_pk_size;
-			}
-		}
-
-		if (extra_size < 0x80) {
-			*b++ = static_cast<byte>(extra_size);
-		} else {
-			ut_ad(extra_size < 0x8000);
-			*b++ = static_cast<byte>(0x80 | (extra_size >> 8));
-			*b++ = static_cast<byte>(extra_size);
-		}
-
-		if (is_instant) {
-			*b++ = fake_extra_size
-				? REC_STATUS_INSTANT
-				: rec_get_status(rec);
-		} else {
-			ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY);
-		}
-
-		memcpy(b, rec - rec_extra_size - omit_size, rec_extra_size);
-		b += rec_extra_size;
-		memcpy(b, fake_extra_buf, fake_extra_size);
-		b += fake_extra_size;
-		ulint len;
-		ulint trx_id_offs = rec_get_nth_field_offs(
-			offsets, index->n_uniq, &len);
-		ut_ad(len == DATA_TRX_ID_LEN);
-		memcpy(b, rec, rec_offs_data_size(offsets));
-		if (trx_read_trx_id(b + trx_id_offs) < log->min_trx) {
-			memcpy(b + trx_id_offs,
-			       reset_trx_id, sizeof reset_trx_id);
-		}
-		b += rec_offs_data_size(offsets);
-
-		row_log_table_close(index, b, mrec_size, avail_size);
-	}
-}
+	log->fd = OS_FILE_CLOSED;
+	mutex_create(LATCH_ID_INDEX_ONLINE_LOG, &log->mutex);
 
-/******************************************************//**
-Logs an update to a table that is being rebuilt.
-This will be merged in row_log_table_apply_update(). */
-void
-row_log_table_update(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	const dtuple_t*	old_pk)	/*!< in: row_log_table_get_pk()
-				before the update */
-{
-	row_log_table_low(rec, index, offsets, false, old_pk);
-}
-
-/** Gets the old table column of a PRIMARY KEY column.
-@param table old table (before ALTER TABLE)
-@param col_map mapping of old column numbers to new ones
-@param col_no column position in the new table
-@return old table column, or NULL if this is an added column */
-static
-const dict_col_t*
-row_log_table_get_pk_old_col(
-/*=========================*/
-	const dict_table_t*	table,
-	const ulint*		col_map,
-	ulint			col_no)
-{
-	for (ulint i = 0; i < table->n_cols; i++) {
-		if (col_no == col_map[i]) {
-			return(dict_table_get_nth_col(table, i));
-		}
-	}
-
-	return(NULL);
-}
-
-/** Maps an old table column of a PRIMARY KEY column.
-@param[in]	ifield		clustered index field in the new table (after
-ALTER TABLE)
-@param[in,out]	dfield		clustered index tuple field in the new table
-@param[in,out]	heap		memory heap for allocating dfield contents
-@param[in]	rec		clustered index leaf page record in the old
-table
-@param[in]	offsets		rec_get_offsets(rec)
-@param[in]	i		rec field corresponding to col
-@param[in]	page_size	page size of the old table
-@param[in]	max_len		maximum length of dfield
-@param[in]	log		row log for the table
-@retval DB_INVALID_NULL		if a NULL value is encountered
-@retval DB_TOO_BIG_INDEX_COL	if the maximum prefix length is exceeded */
-static
-dberr_t
-row_log_table_get_pk_col(
-	const dict_field_t*	ifield,
-	dfield_t*		dfield,
-	mem_heap_t*		heap,
-	const rec_t*		rec,
-	const ulint*		offsets,
-	ulint			i,
-	const page_size_t&	page_size,
-	ulint			max_len,
-	const row_log_t*	log)
-{
-	const byte*	field;
-	ulint		len;
-
-	field = rec_get_nth_field(rec, offsets, i, &len);
-
-	if (len == UNIV_SQL_NULL) {
-		if (!log->allow_not_null) {
-			return(DB_INVALID_NULL);
-		}
-
-		ulint n_default_cols = i - DATA_N_SYS_COLS;
-
-		field = static_cast<const byte*>(
-			log->defaults->fields[n_default_cols].data);
-		if (!field) {
-			return(DB_INVALID_NULL);
-		}
-		len = log->defaults->fields[i - DATA_N_SYS_COLS].len;
-	}
-
-	if (rec_offs_nth_extern(offsets, i)) {
-		ulint	field_len = ifield->prefix_len;
-		byte*	blob_field;
-
-		if (!field_len) {
-			field_len = ifield->fixed_len;
-			if (!field_len) {
-				field_len = max_len + 1;
-			}
-		}
-
-		blob_field = static_cast<byte*>(
-			mem_heap_alloc(heap, field_len));
-
-		len = btr_copy_externally_stored_field_prefix(
-			blob_field, field_len, page_size, field, len);
-		if (len >= max_len + 1) {
-			return(DB_TOO_BIG_INDEX_COL);
-		}
-
-		dfield_set_data(dfield, blob_field, len);
-	} else {
-		dfield_set_data(dfield, mem_heap_dup(heap, field, len), len);
-	}
-
-	return(DB_SUCCESS);
-}
-
-/******************************************************//**
-Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR
-of a table that is being rebuilt.
-@return tuple of PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR in the rebuilt table,
-or NULL if the PRIMARY KEY definition does not change */
-const dtuple_t*
-row_log_table_get_pk(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets,/*!< in: rec_get_offsets(rec,index) */
-	byte*		sys,	/*!< out: DB_TRX_ID,DB_ROLL_PTR for
-				row_log_table_delete(), or NULL */
-	mem_heap_t**	heap)	/*!< in/out: memory heap where allocated */
-{
-	dtuple_t*	tuple	= NULL;
-	row_log_t*	log	= index->online_log;
-
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_online_ddl(index));
-	ut_ad(!offsets || rec_offs_validate(rec, index, offsets));
-	ut_ad(rw_lock_own_flagged(
-			&index->lock,
-			RW_LOCK_FLAG_S | RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
-
-	ut_ad(log);
-	ut_ad(log->table);
-	ut_ad(log->min_trx);
-
-	if (log->same_pk) {
-		/* The PRIMARY KEY columns are unchanged. */
-		if (sys) {
-			/* Store the DB_TRX_ID,DB_ROLL_PTR. */
-			ulint	trx_id_offs = index->trx_id_offset;
-
-			if (!trx_id_offs) {
-				ulint	pos = dict_index_get_sys_col_pos(
-					index, DATA_TRX_ID);
-				ulint	len;
-				ut_ad(pos > 0);
-
-				if (!offsets) {
-					offsets = rec_get_offsets(
-						rec, index, NULL, true,
-						pos + 1, heap);
-				}
-
-				trx_id_offs = rec_get_nth_field_offs(
-					offsets, pos, &len);
-				ut_ad(len == DATA_TRX_ID_LEN);
-			}
-
-			const byte* ptr = trx_read_trx_id(rec + trx_id_offs)
-				< log->min_trx
-				? reset_trx_id
-				: rec + trx_id_offs;
-
-			memcpy(sys, ptr, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
-			ut_d(trx_id_check(sys, log->min_trx));
-		}
-
-		return(NULL);
-	}
-
-	mutex_enter(&log->mutex);
-
-	/* log->error is protected by log->mutex. */
-	if (log->error == DB_SUCCESS) {
-		dict_table_t*	new_table	= log->table;
-		dict_index_t*	new_index
-			= dict_table_get_first_index(new_table);
-		const ulint	new_n_uniq
-			= dict_index_get_n_unique(new_index);
-
-		if (!*heap) {
-			ulint	size = 0;
-
-			if (!offsets) {
-				size += (1 + REC_OFFS_HEADER_SIZE
-					 + unsigned(index->n_fields))
-					* sizeof *offsets;
-			}
-
-			for (ulint i = 0; i < new_n_uniq; i++) {
-				size += dict_col_get_min_size(
-					dict_index_get_nth_col(new_index, i));
-			}
-
-			*heap = mem_heap_create(
-				DTUPLE_EST_ALLOC(new_n_uniq + 2) + size);
-		}
-
-		if (!offsets) {
-			offsets = rec_get_offsets(rec, index, NULL, true,
-						  ULINT_UNDEFINED, heap);
-		}
-
-		tuple = dtuple_create(*heap, new_n_uniq + 2);
-		dict_index_copy_types(tuple, new_index, tuple->n_fields);
-		dtuple_set_n_fields_cmp(tuple, new_n_uniq);
-
-		const ulint max_len = DICT_MAX_FIELD_LEN_BY_FORMAT(new_table);
-
-		const page_size_t&	page_size
-			= dict_table_page_size(index->table);
-
-		for (ulint new_i = 0; new_i < new_n_uniq; new_i++) {
-			dict_field_t*	ifield;
-			dfield_t*	dfield;
-			ulint		prtype;
-			ulint		mbminlen, mbmaxlen;
-
-			ifield = dict_index_get_nth_field(new_index, new_i);
-			dfield = dtuple_get_nth_field(tuple, new_i);
-
-			const ulint	col_no
-				= dict_field_get_col(ifield)->ind;
-
-			if (const dict_col_t* col
-			    = row_log_table_get_pk_old_col(
-				    index->table, log->col_map, col_no)) {
-				ulint	i = dict_col_get_clust_pos(col, index);
-
-				if (i == ULINT_UNDEFINED) {
-					ut_ad(0);
-					log->error = DB_CORRUPTION;
-					goto err_exit;
-				}
-
-				log->error = row_log_table_get_pk_col(
-					ifield, dfield, *heap,
-					rec, offsets, i, page_size, max_len, log);
-
-				if (log->error != DB_SUCCESS) {
-err_exit:
-					tuple = NULL;
-					goto func_exit;
-				}
-
-				mbminlen = col->mbminlen;
-				mbmaxlen = col->mbmaxlen;
-				prtype = col->prtype;
-			} else {
-				/* No matching column was found in the old
-				table, so this must be an added column.
-				Copy the default value. */
-				ut_ad(log->defaults);
-
-				dfield_copy(dfield, dtuple_get_nth_field(
-						    log->defaults, col_no));
-				mbminlen = dfield->type.mbminlen;
-				mbmaxlen = dfield->type.mbmaxlen;
-				prtype = dfield->type.prtype;
-			}
-
-			ut_ad(!dfield_is_ext(dfield));
-			ut_ad(!dfield_is_null(dfield));
-
-			if (ifield->prefix_len) {
-				ulint	len = dtype_get_at_most_n_mbchars(
-					prtype, mbminlen, mbmaxlen,
-					ifield->prefix_len,
-					dfield_get_len(dfield),
-					static_cast<const char*>(
-						dfield_get_data(dfield)));
-
-				ut_ad(len <= dfield_get_len(dfield));
-				dfield_set_len(dfield, len);
-			}
-		}
-
-		const byte* trx_roll = rec
-			+ row_get_trx_id_offset(index, offsets);
-
-		/* Copy the fields, because the fields will be updated
-		or the record may be moved somewhere else in the B-tree
-		as part of the upcoming operation. */
-		if (trx_read_trx_id(trx_roll) < log->min_trx) {
-			trx_roll = reset_trx_id;
-			if (sys) {
-				memcpy(sys, trx_roll,
-				       DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
-			}
-		} else if (sys) {
-			memcpy(sys, trx_roll,
-			       DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
-			trx_roll = sys;
-		} else {
-			trx_roll = static_cast<const byte*>(
-				mem_heap_dup(
-					*heap, trx_roll,
-					DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN));
-		}
-
-		ut_d(trx_id_check(trx_roll, log->min_trx));
-
-		dfield_set_data(dtuple_get_nth_field(tuple, new_n_uniq),
-				trx_roll, DATA_TRX_ID_LEN);
-		dfield_set_data(dtuple_get_nth_field(tuple, new_n_uniq + 1),
-				trx_roll + DATA_TRX_ID_LEN, DATA_ROLL_PTR_LEN);
-	}
-
-func_exit:
-	mutex_exit(&log->mutex);
-	return(tuple);
-}
-
-/******************************************************//**
-Logs an insert to a table that is being rebuilt.
-This will be merged in row_log_table_apply_insert(). */
-void
-row_log_table_insert(
-/*=================*/
-	const rec_t*	rec,	/*!< in: clustered index leaf page record,
-				page X-latched */
-	dict_index_t*	index,	/*!< in/out: clustered index, S-latched
-				or X-latched */
-	const ulint*	offsets)/*!< in: rec_get_offsets(rec,index) */
-{
-	row_log_table_low(rec, index, offsets, true, NULL);
-}
-
-/******************************************************//**
-Notes that a BLOB is being freed during online ALTER TABLE. */
-void
-row_log_table_blob_free(
-/*====================*/
-	dict_index_t*	index,	/*!< in/out: clustered index, X-latched */
-	ulint		page_no)/*!< in: starting page number of the BLOB */
-{
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_online_ddl(index));
-	ut_ad(rw_lock_own_flagged(
-			&index->lock,
-			RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
-	ut_ad(page_no != FIL_NULL);
-
-	if (index->online_log->error != DB_SUCCESS) {
-		return;
-	}
-
-	page_no_map*	blobs	= index->online_log->blobs;
-
-	if (blobs == NULL) {
-		index->online_log->blobs = blobs = UT_NEW_NOKEY(page_no_map());
-	}
-
-#ifdef UNIV_DEBUG
-	const ulonglong	log_pos = index->online_log->tail.total;
-#else
-# define log_pos /* empty */
-#endif /* UNIV_DEBUG */
-
-	const page_no_map::value_type v(page_no,
-					row_log_table_blob_t(log_pos));
-
-	std::pair<page_no_map::iterator,bool> p = blobs->insert(v);
-
-	if (!p.second) {
-		/* Update the existing mapping. */
-		ut_ad(p.first->first == page_no);
-		p.first->second.blob_free(log_pos);
-	}
-#undef log_pos
-}
-
-/******************************************************//**
-Notes that a BLOB is being allocated during online ALTER TABLE. */
-void
-row_log_table_blob_alloc(
-/*=====================*/
-	dict_index_t*	index,	/*!< in/out: clustered index, X-latched */
-	ulint		page_no)/*!< in: starting page number of the BLOB */
-{
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_online_ddl(index));
-
-	ut_ad(rw_lock_own_flagged(
-			&index->lock,
-			RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
-
-	ut_ad(page_no != FIL_NULL);
-
-	if (index->online_log->error != DB_SUCCESS) {
-		return;
-	}
-
-	/* Only track allocations if the same page has been freed
-	earlier. Double allocation without a free is not allowed. */
-	if (page_no_map* blobs = index->online_log->blobs) {
-		page_no_map::iterator p = blobs->find(page_no);
-
-		if (p != blobs->end()) {
-			ut_ad(p->first == page_no);
-			p->second.blob_alloc(index->online_log->tail.total);
-		}
-	}
-}
-
-/******************************************************//**
-Converts a log record to a table row.
-@return converted row, or NULL if the conversion fails */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-const dtuple_t*
-row_log_table_apply_convert_mrec(
-/*=============================*/
-	const mrec_t*		mrec,		/*!< in: merge record */
-	dict_index_t*		index,		/*!< in: index of mrec */
-	const ulint*		offsets,	/*!< in: offsets of mrec */
-	row_log_t*		log,		/*!< in: rebuild context */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	dberr_t*		error)		/*!< out: DB_SUCCESS or
-						DB_MISSING_HISTORY or
-						reason of failure */
-{
-	dtuple_t*	row;
-
-	log->n_rows++;
-	*error = DB_SUCCESS;
-
-	/* This is based on row_build(). */
-	if (log->defaults) {
-		row = dtuple_copy(log->defaults, heap);
-		/* dict_table_copy_types() would set the fields to NULL */
-		for (ulint i = 0; i < dict_table_get_n_cols(log->table); i++) {
-			dict_col_copy_type(
-				dict_table_get_nth_col(log->table, i),
-				dfield_get_type(dtuple_get_nth_field(row, i)));
-		}
-	} else {
-		row = dtuple_create(heap, dict_table_get_n_cols(log->table));
-		dict_table_copy_types(row, log->table);
-	}
-
-	for (ulint i = 0; i < rec_offs_n_fields(offsets); i++) {
-		const dict_field_t*	ind_field
-			= dict_index_get_nth_field(index, i);
-
-		if (ind_field->prefix_len) {
-			/* Column prefixes can only occur in key
-			fields, which cannot be stored externally. For
-			a column prefix, there should also be the full
-			field in the clustered index tuple. The row
-			tuple comprises full fields, not prefixes. */
-			ut_ad(!rec_offs_nth_extern(offsets, i));
-			continue;
-		}
-
-		const dict_col_t*	col
-			= dict_field_get_col(ind_field);
-
-		if (col->is_dropped()) {
-			/* the column was instantly dropped earlier */
-			ut_ad(index->table->instant);
-			continue;
-		}
-
-		ulint			col_no
-			= log->col_map[dict_col_get_no(col)];
-
-		if (col_no == ULINT_UNDEFINED) {
-			/* the column is being dropped now */
-			continue;
-		}
-
-		dfield_t*	dfield
-			= dtuple_get_nth_field(row, col_no);
-
-		ulint			len;
-		const byte*		data;
-
-		if (rec_offs_nth_extern(offsets, i)) {
-			ut_ad(rec_offs_any_extern(offsets));
-			rw_lock_x_lock(dict_index_get_lock(index));
-
-			if (const page_no_map* blobs = log->blobs) {
-				data = rec_get_nth_field(
-					mrec, offsets, i, &len);
-				ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
-
-				ulint	page_no = mach_read_from_4(
-					data + len - (BTR_EXTERN_FIELD_REF_SIZE
-						      - BTR_EXTERN_PAGE_NO));
-				page_no_map::const_iterator p = blobs->find(
-					page_no);
-				if (p != blobs->end()
-				    && p->second.is_freed(log->head.total)) {
-					/* This BLOB has been freed.
-					We must not access the row. */
-					*error = DB_MISSING_HISTORY;
-					dfield_set_data(dfield, data, len);
-					dfield_set_ext(dfield);
-					goto blob_done;
-				}
-			}
-
-			data = btr_rec_copy_externally_stored_field(
-				mrec, offsets,
-				dict_table_page_size(index->table),
-				i, &len, heap);
-			ut_a(data);
-			dfield_set_data(dfield, data, len);
-blob_done:
-			rw_lock_x_unlock(dict_index_get_lock(index));
-		} else {
-			data = rec_get_nth_field(mrec, offsets, i, &len);
-			if (len == UNIV_SQL_DEFAULT) {
-				data = log->instant_field_value(i, &len);
-			}
-			dfield_set_data(dfield, data, len);
-		}
-
-		if (len != UNIV_SQL_NULL && col->mtype == DATA_MYSQL
-		    && col->len != len && !dict_table_is_comp(log->table)) {
-
-			ut_ad(col->len >= len);
-			if (dict_table_is_comp(index->table)) {
-				byte*	buf = (byte*) mem_heap_alloc(heap,
-								     col->len);
-				memcpy(buf, dfield->data, len);
-				memset(buf + len, 0x20, col->len - len);
-
-				dfield_set_data(dfield, buf, col->len);
-			} else {
-				/* field length mismatch should not happen
-				when rebuilding the redundant row format
-				table. */
-				ut_ad(0);
-				*error = DB_CORRUPTION;
-				return(NULL);
-			}
-		}
-
-		/* See if any columns were changed to NULL or NOT NULL. */
-		const dict_col_t*	new_col
-			= dict_table_get_nth_col(log->table, col_no);
-		ut_ad(new_col->mtype == col->mtype);
-
-		/* Assert that prtype matches except for nullability. */
-		ut_ad(!((new_col->prtype ^ col->prtype) & ~DATA_NOT_NULL));
-		ut_ad(!((new_col->prtype ^ dfield_get_type(dfield)->prtype)
-			& ~DATA_NOT_NULL));
-
-		if (new_col->prtype == col->prtype) {
-			continue;
-		}
-
-		if ((new_col->prtype & DATA_NOT_NULL)
-		    && dfield_is_null(dfield)) {
-
-			const dfield_t& default_field
-				= log->defaults->fields[col_no];
-			Field* field = log->old_table->field[col_no];
-
-			field->set_warning(Sql_condition::WARN_LEVEL_WARN,
-					   WARN_DATA_TRUNCATED, 1, ulong(log->n_rows));
-
-			if (!log->allow_not_null) {
-				/* We got a NULL value for a NOT NULL column. */
-				*error = DB_INVALID_NULL;
-				return NULL;
-			}
-
-			*dfield = default_field;
-		}
-
-		/* Adjust the DATA_NOT_NULL flag in the parsed row. */
-		dfield_get_type(dfield)->prtype = new_col->prtype;
-
-		ut_ad(dict_col_type_assert_equal(new_col,
-						 dfield_get_type(dfield)));
-	}
-
-	return(row);
-}
-
-/******************************************************//**
-Replays an insert operation on a table that was rebuilt.
-@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_log_table_apply_insert_low(
-/*===========================*/
-	que_thr_t*		thr,		/*!< in: query graph */
-	const dtuple_t*		row,		/*!< in: table row
-						in the old table definition */
-	mem_heap_t*		offsets_heap,	/*!< in/out: memory heap
-						that can be emptied */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	row_merge_dup_t*	dup)		/*!< in/out: for reporting
-						duplicate key errors */
-{
-	dberr_t		error;
-	dtuple_t*	entry;
-	const row_log_t*log	= dup->index->online_log;
-	dict_index_t*	index	= dict_table_get_first_index(log->table);
-	ulint		n_index = 0;
-
-	ut_ad(dtuple_validate(row));
-
-	DBUG_LOG("ib_alter_table",
-		 "insert table " << index->table->id << " (index "
-		 << index->id << "): " << rec_printer(row).str());
-
-	static const ulint	flags
-		= (BTR_CREATE_FLAG
-		   | BTR_NO_LOCKING_FLAG
-		   | BTR_NO_UNDO_LOG_FLAG
-		   | BTR_KEEP_SYS_FLAG);
-
-	entry = row_build_index_entry(row, NULL, index, heap);
-
-	error = row_ins_clust_index_entry_low(
-		flags, BTR_MODIFY_TREE, index, index->n_uniq,
-		entry, 0, thr, false);
-
-	switch (error) {
-	case DB_SUCCESS:
-		break;
-	case DB_SUCCESS_LOCKED_REC:
-		/* The row had already been copied to the table. */
-		return(DB_SUCCESS);
-	default:
-		return(error);
-	}
-
-	ut_ad(dict_index_is_clust(index));
-
-	for (n_index += index->type != DICT_CLUSTERED;
-	     (index = dict_table_get_next_index(index)); n_index++) {
-		if (index->type & DICT_FTS) {
-			continue;
-		}
-
-		entry = row_build_index_entry(row, NULL, index, heap);
-		error = row_ins_sec_index_entry_low(
-			flags, BTR_MODIFY_TREE,
-			index, offsets_heap, heap, entry,
-			thr_get_trx(thr)->id, thr, false);
-
-		if (error != DB_SUCCESS) {
-			if (error == DB_DUPLICATE_KEY) {
-				thr_get_trx(thr)->error_key_num = n_index;
-			}
-			break;
-		}
-	}
-
-	return(error);
-}
-
-/******************************************************//**
-Replays an insert operation on a table that was rebuilt.
-@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_log_table_apply_insert(
-/*=======================*/
-	que_thr_t*		thr,		/*!< in: query graph */
-	const mrec_t*		mrec,		/*!< in: record to insert */
-	const ulint*		offsets,	/*!< in: offsets of mrec */
-	mem_heap_t*		offsets_heap,	/*!< in/out: memory heap
-						that can be emptied */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	row_merge_dup_t*	dup)		/*!< in/out: for reporting
-						duplicate key errors */
-{
-	row_log_t*log	= dup->index->online_log;
-	dberr_t		error;
-	const dtuple_t*	row	= row_log_table_apply_convert_mrec(
-		mrec, dup->index, offsets, log, heap, &error);
-
-	switch (error) {
-	case DB_MISSING_HISTORY:
-		ut_ad(log->blobs);
-		/* Because some BLOBs are missing, we know that the
-		transaction was rolled back later (a rollback of
-		an insert can free BLOBs).
-		We can simply skip the insert: the subsequent
-		ROW_T_DELETE will be ignored, or a ROW_T_UPDATE will
-		be interpreted as ROW_T_INSERT. */
-		return(DB_SUCCESS);
-	case DB_SUCCESS:
-		ut_ad(row != NULL);
-		break;
-	default:
-		ut_ad(0);
-	case DB_INVALID_NULL:
-		ut_ad(row == NULL);
-		return(error);
-	}
-
-	error = row_log_table_apply_insert_low(
-		thr, row, offsets_heap, heap, dup);
-	if (error != DB_SUCCESS) {
-		/* Report the erroneous row using the new
-		version of the table. */
-		innobase_row_to_mysql(dup->table, log->table, row);
-	}
-	return(error);
-}
-
-/******************************************************//**
-Deletes a record from a table that is being rebuilt.
-@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_log_table_apply_delete_low(
-/*===========================*/
-	btr_pcur_t*		pcur,		/*!< in/out: B-tree cursor,
-						will be trashed */
-	const ulint*		offsets,	/*!< in: offsets on pcur */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	mtr_t*			mtr)		/*!< in/out: mini-transaction,
-						will be committed */
-{
-	dberr_t		error;
-	row_ext_t*	ext;
-	dtuple_t*	row;
-	dict_index_t*	index	= btr_pcur_get_btr_cur(pcur)->index;
-
-	ut_ad(dict_index_is_clust(index));
-
-	DBUG_LOG("ib_alter_table",
-		 "delete table " << index->table->id << " (index "
-		 << index->id << "): "
-		 << rec_printer(btr_pcur_get_rec(pcur), offsets).str());
-
-	if (dict_table_get_next_index(index)) {
-		/* Build a row template for purging secondary index entries. */
-		row = row_build(
-			ROW_COPY_DATA, index, btr_pcur_get_rec(pcur),
-			offsets, NULL, NULL, NULL, &ext, heap);
-	} else {
-		row = NULL;
-	}
-
-	btr_cur_pessimistic_delete(&error, FALSE, btr_pcur_get_btr_cur(pcur),
-				   BTR_CREATE_FLAG, false, mtr);
-	mtr_commit(mtr);
-
-	if (error != DB_SUCCESS) {
-		return(error);
-	}
-
-	while ((index = dict_table_get_next_index(index)) != NULL) {
-		if (index->type & DICT_FTS) {
-			continue;
-		}
-
-		const dtuple_t*	entry = row_build_index_entry(
-			row, ext, index, heap);
-		mtr->start();
-		index->set_modified(*mtr);
-		btr_pcur_open(index, entry, PAGE_CUR_LE,
-			      BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE,
-			      pcur, mtr);
-#ifdef UNIV_DEBUG
-		switch (btr_pcur_get_btr_cur(pcur)->flag) {
-		case BTR_CUR_DELETE_REF:
-		case BTR_CUR_DEL_MARK_IBUF:
-		case BTR_CUR_DELETE_IBUF:
-		case BTR_CUR_INSERT_TO_IBUF:
-			/* We did not request buffering. */
-			break;
-		case BTR_CUR_HASH:
-		case BTR_CUR_HASH_FAIL:
-		case BTR_CUR_BINARY:
-			goto flag_ok;
-		}
-		ut_ad(0);
-flag_ok:
-#endif /* UNIV_DEBUG */
-
-		if (page_rec_is_infimum(btr_pcur_get_rec(pcur))
-		    || btr_pcur_get_low_match(pcur) < index->n_uniq) {
-			/* All secondary index entries should be
-			found, because new_table is being modified by
-			this thread only, and all indexes should be
-			updated in sync. */
-			mtr->commit();
-			return(DB_INDEX_CORRUPT);
-		}
-
-		btr_cur_pessimistic_delete(&error, FALSE,
-					   btr_pcur_get_btr_cur(pcur),
-					   BTR_CREATE_FLAG, false, mtr);
-		mtr->commit();
-	}
-
-	return(error);
-}
-
-/******************************************************//**
-Replays a delete operation on a table that was rebuilt.
-@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_log_table_apply_delete(
-/*=======================*/
-	ulint			trx_id_col,	/*!< in: position of
-						DB_TRX_ID in the new
-						clustered index */
-	const mrec_t*		mrec,		/*!< in: merge record */
-	const ulint*		moffsets,	/*!< in: offsets of mrec */
-	mem_heap_t*		offsets_heap,	/*!< in/out: memory heap
-						that can be emptied */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	const row_log_t*	log)		/*!< in: online log */
-{
-	dict_table_t*	new_table = log->table;
-	dict_index_t*	index = dict_table_get_first_index(new_table);
-	dtuple_t*	old_pk;
-	mtr_t		mtr;
-	btr_pcur_t	pcur;
-	ulint*		offsets;
-
-	ut_ad(rec_offs_n_fields(moffsets) == index->first_user_field());
-	ut_ad(!rec_offs_any_extern(moffsets));
-
-	/* Convert the row to a search tuple. */
-	old_pk = dtuple_create(heap, index->n_uniq);
-	dict_index_copy_types(old_pk, index, index->n_uniq);
-
-	for (ulint i = 0; i < index->n_uniq; i++) {
-		ulint		len;
-		const void*	field;
-		field = rec_get_nth_field(mrec, moffsets, i, &len);
-		ut_ad(len != UNIV_SQL_NULL);
-		dfield_set_data(dtuple_get_nth_field(old_pk, i),
-				field, len);
-	}
-
-	mtr_start(&mtr);
-	index->set_modified(mtr);
-	btr_pcur_open(index, old_pk, PAGE_CUR_LE,
-		      BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE,
-		      &pcur, &mtr);
-#ifdef UNIV_DEBUG
-	switch (btr_pcur_get_btr_cur(&pcur)->flag) {
-	case BTR_CUR_DELETE_REF:
-	case BTR_CUR_DEL_MARK_IBUF:
-	case BTR_CUR_DELETE_IBUF:
-	case BTR_CUR_INSERT_TO_IBUF:
-		/* We did not request buffering. */
-		break;
-	case BTR_CUR_HASH:
-	case BTR_CUR_HASH_FAIL:
-	case BTR_CUR_BINARY:
-		goto flag_ok;
-	}
-	ut_ad(0);
-flag_ok:
-#endif /* UNIV_DEBUG */
-
-	if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
-	    || btr_pcur_get_low_match(&pcur) < index->n_uniq) {
-all_done:
-		mtr_commit(&mtr);
-		/* The record was not found. All done. */
-		/* This should only happen when an earlier
-		ROW_T_INSERT was skipped or
-		ROW_T_UPDATE was interpreted as ROW_T_DELETE
-		due to BLOBs having been freed by rollback. */
-		return(DB_SUCCESS);
-	}
-
-	offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL, true,
-				  ULINT_UNDEFINED, &offsets_heap);
-#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
-	ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets));
-#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
-
-	/* Only remove the record if DB_TRX_ID,DB_ROLL_PTR match. */
-
-	{
-		ulint		len;
-		const byte*	mrec_trx_id
-			= rec_get_nth_field(mrec, moffsets, trx_id_col, &len);
-		ut_ad(len == DATA_TRX_ID_LEN);
-		const byte*	rec_trx_id
-			= rec_get_nth_field(btr_pcur_get_rec(&pcur), offsets,
-					    trx_id_col, &len);
-		ut_ad(len == DATA_TRX_ID_LEN);
-		ut_d(trx_id_check(rec_trx_id, log->min_trx));
-		ut_d(trx_id_check(mrec_trx_id, log->min_trx));
-
-		ut_ad(rec_get_nth_field(mrec, moffsets, trx_id_col + 1, &len)
-		      == mrec_trx_id + DATA_TRX_ID_LEN);
-		ut_ad(len == DATA_ROLL_PTR_LEN);
-		ut_ad(rec_get_nth_field(btr_pcur_get_rec(&pcur), offsets,
-					trx_id_col + 1, &len)
-		      == rec_trx_id + DATA_TRX_ID_LEN);
-		ut_ad(len == DATA_ROLL_PTR_LEN);
-
-		if (memcmp(mrec_trx_id, rec_trx_id,
-			   DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN)) {
-			/* The ROW_T_DELETE was logged for a different
-			PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR.
-			This is possible if a ROW_T_INSERT was skipped
-			or a ROW_T_UPDATE was interpreted as ROW_T_DELETE
-			because some BLOBs were missing due to
-			(1) rolling back the initial insert, or
-			(2) purging the BLOB for a later ROW_T_DELETE
-			(3) purging 'old values' for a later ROW_T_UPDATE
-			or ROW_T_DELETE. */
-			ut_ad(!log->same_pk);
-			goto all_done;
-		}
-	}
-
-	return row_log_table_apply_delete_low(&pcur, offsets, heap, &mtr);
-}
-
-/******************************************************//**
-Replays an update operation on a table that was rebuilt.
-@return DB_SUCCESS or error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
-row_log_table_apply_update(
-/*=======================*/
-	que_thr_t*		thr,		/*!< in: query graph */
-	ulint			new_trx_id_col,	/*!< in: position of
-						DB_TRX_ID in the new
-						clustered index */
-	const mrec_t*		mrec,		/*!< in: new value */
-	const ulint*		offsets,	/*!< in: offsets of mrec */
-	mem_heap_t*		offsets_heap,	/*!< in/out: memory heap
-						that can be emptied */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	row_merge_dup_t*	dup,		/*!< in/out: for reporting
-						duplicate key errors */
-	const dtuple_t*		old_pk)		/*!< in: PRIMARY KEY and
-						DB_TRX_ID,DB_ROLL_PTR
-						of the old value,
-						or PRIMARY KEY if same_pk */
-{
-	row_log_t*	log	= dup->index->online_log;
-	const dtuple_t*	row;
-	dict_index_t*	index	= dict_table_get_first_index(log->table);
-	mtr_t		mtr;
-	btr_pcur_t	pcur;
-	dberr_t		error;
-	ulint		n_index = 0;
-
-	ut_ad(dtuple_get_n_fields_cmp(old_pk)
-	      == dict_index_get_n_unique(index));
-	ut_ad(dtuple_get_n_fields(old_pk)
-	      == dict_index_get_n_unique(index)
-	      + (log->same_pk ? 0 : 2));
-
-	row = row_log_table_apply_convert_mrec(
-		mrec, dup->index, offsets, log, heap, &error);
-
-	switch (error) {
-	case DB_MISSING_HISTORY:
-		/* The record contained BLOBs that are now missing. */
-		ut_ad(log->blobs);
-		/* Whether or not we are updating the PRIMARY KEY, we
-		know that there should be a subsequent
-		ROW_T_DELETE for rolling back a preceding ROW_T_INSERT,
-		overriding this ROW_T_UPDATE record. (*1)
-
-		This allows us to interpret this ROW_T_UPDATE
-		as ROW_T_DELETE.
-
-		When applying the subsequent ROW_T_DELETE, no matching
-		record will be found. */
-		/* fall through */
-	case DB_SUCCESS:
-		ut_ad(row != NULL);
-		break;
-	default:
-		ut_ad(0);
-	case DB_INVALID_NULL:
-		ut_ad(row == NULL);
-		return(error);
-	}
-
-	mtr_start(&mtr);
-	index->set_modified(mtr);
-	btr_pcur_open(index, old_pk, PAGE_CUR_LE,
-		      BTR_MODIFY_TREE, &pcur, &mtr);
-#ifdef UNIV_DEBUG
-	switch (btr_pcur_get_btr_cur(&pcur)->flag) {
-	case BTR_CUR_DELETE_REF:
-	case BTR_CUR_DEL_MARK_IBUF:
-	case BTR_CUR_DELETE_IBUF:
-	case BTR_CUR_INSERT_TO_IBUF:
-		ut_ad(0);/* We did not request buffering. */
-	case BTR_CUR_HASH:
-	case BTR_CUR_HASH_FAIL:
-	case BTR_CUR_BINARY:
-		break;
-	}
-#endif /* UNIV_DEBUG */
-
-	if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
-	    || btr_pcur_get_low_match(&pcur) < index->n_uniq) {
-		/* The record was not found. This should only happen
-		when an earlier ROW_T_INSERT or ROW_T_UPDATE was
-		diverted because BLOBs were freed when the insert was
-		later rolled back. */
-
-		ut_ad(log->blobs);
-
-		if (error == DB_SUCCESS) {
-			/* An earlier ROW_T_INSERT could have been
-			skipped because of a missing BLOB, like this:
-
-			BEGIN;
-			INSERT INTO t SET blob_col='blob value';
-			UPDATE t SET blob_col='';
-			ROLLBACK;
-
-			This would generate the following records:
-			ROW_T_INSERT (referring to 'blob value')
-			ROW_T_UPDATE
-			ROW_T_UPDATE (referring to 'blob value')
-			ROW_T_DELETE
-			[ROLLBACK removes the 'blob value']
-
-			The ROW_T_INSERT would have been skipped
-			because of a missing BLOB. Now we are
-			executing the first ROW_T_UPDATE.
-			The second ROW_T_UPDATE (for the ROLLBACK)
-			would be interpreted as ROW_T_DELETE, because
-			the BLOB would be missing.
-
-			We could probably assume that the transaction
-			has been rolled back and simply skip the
-			'insert' part of this ROW_T_UPDATE record.
-			However, there might be some complex scenario
-			that could interfere with such a shortcut.
-			So, we will insert the row (and risk
-			introducing a bogus duplicate key error
-			for the ALTER TABLE), and a subsequent
-			ROW_T_UPDATE or ROW_T_DELETE will delete it. */
-			mtr_commit(&mtr);
-			error = row_log_table_apply_insert_low(
-				thr, row, offsets_heap, heap, dup);
-		} else {
-			/* Some BLOBs are missing, so we are interpreting
-			this ROW_T_UPDATE as ROW_T_DELETE (see *1).
-			Because the record was not found, we do nothing. */
-			ut_ad(error == DB_MISSING_HISTORY);
-			error = DB_SUCCESS;
-func_exit:
-			mtr_commit(&mtr);
-		}
-func_exit_committed:
-		ut_ad(mtr.has_committed());
-
-		if (error != DB_SUCCESS) {
-			/* Report the erroneous row using the new
-			version of the table. */
-			innobase_row_to_mysql(dup->table, log->table, row);
-		}
-
-		return(error);
-	}
-
-	/* Prepare to update (or delete) the record. */
-	ulint*		cur_offsets	= rec_get_offsets(
-		btr_pcur_get_rec(&pcur), index, NULL, true,
-		ULINT_UNDEFINED, &offsets_heap);
-
-	if (!log->same_pk) {
-		/* Only update the record if DB_TRX_ID,DB_ROLL_PTR match what
-		was buffered. */
-		ulint		len;
-		const byte*	rec_trx_id
-			= rec_get_nth_field(btr_pcur_get_rec(&pcur),
-					    cur_offsets, index->n_uniq, &len);
-		const dfield_t*	old_pk_trx_id
-			= dtuple_get_nth_field(old_pk, index->n_uniq);
-		ut_ad(len == DATA_TRX_ID_LEN);
-		ut_d(trx_id_check(rec_trx_id, log->min_trx));
-		ut_ad(old_pk_trx_id->len == DATA_TRX_ID_LEN);
-		ut_ad(old_pk_trx_id[1].len == DATA_ROLL_PTR_LEN);
-		ut_ad(DATA_TRX_ID_LEN
-		      + static_cast<const char*>(old_pk_trx_id->data)
-		      == old_pk_trx_id[1].data);
-		ut_d(trx_id_check(old_pk_trx_id->data, log->min_trx));
-
-		if (memcmp(rec_trx_id, old_pk_trx_id->data,
-			   DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN)) {
-			/* The ROW_T_UPDATE was logged for a different
-			DB_TRX_ID,DB_ROLL_PTR. This is possible if an
-			earlier ROW_T_INSERT or ROW_T_UPDATE was diverted
-			because some BLOBs were missing due to rolling
-			back the initial insert or due to purging
-			the old BLOB values of an update. */
-			ut_ad(log->blobs);
-			if (error != DB_SUCCESS) {
-				ut_ad(error == DB_MISSING_HISTORY);
-				/* Some BLOBs are missing, so we are
-				interpreting this ROW_T_UPDATE as
-				ROW_T_DELETE (see *1).
-				Because this is a different row,
-				we will do nothing. */
-				error = DB_SUCCESS;
-			} else {
-				/* Because the user record is missing due to
-				BLOBs that were missing when processing
-				an earlier log record, we should
-				interpret the ROW_T_UPDATE as ROW_T_INSERT.
-				However, there is a different user record
-				with the same PRIMARY KEY value already. */
-				error = DB_DUPLICATE_KEY;
-			}
-
-			goto func_exit;
-		}
-	}
-
-	if (error != DB_SUCCESS) {
-		ut_ad(error == DB_MISSING_HISTORY);
-		ut_ad(log->blobs);
-		/* Some BLOBs are missing, so we are interpreting
-		this ROW_T_UPDATE as ROW_T_DELETE (see *1). */
-		error = row_log_table_apply_delete_low(
-			&pcur, cur_offsets, heap, &mtr);
-		goto func_exit_committed;
-	}
-
-	dtuple_t*	entry	= row_build_index_entry_low(
-		row, NULL, index, heap, ROW_BUILD_NORMAL);
-	upd_t*		update	= row_upd_build_difference_binary(
-		index, entry, btr_pcur_get_rec(&pcur), cur_offsets,
-		false, NULL, heap, dup->table);
-
-	if (!update->n_fields) {
-		/* Nothing to do. */
-		goto func_exit;
-	}
-
-	const bool	pk_updated
-		= upd_get_nth_field(update, 0)->field_no < new_trx_id_col;
-
-	if (pk_updated || rec_offs_any_extern(cur_offsets)) {
-		/* If the record contains any externally stored
-		columns, perform the update by delete and insert,
-		because we will not write any undo log that would
-		allow purge to free any orphaned externally stored
-		columns. */
-
-		if (pk_updated && log->same_pk) {
-			/* The ROW_T_UPDATE log record should only be
-			written when the PRIMARY KEY fields of the
-			record did not change in the old table.  We
-			can only get a change of PRIMARY KEY columns
-			in the rebuilt table if the PRIMARY KEY was
-			redefined (!same_pk). */
-			ut_ad(0);
-			error = DB_CORRUPTION;
-			goto func_exit;
-		}
-
-		error = row_log_table_apply_delete_low(
-			&pcur, cur_offsets, heap, &mtr);
-		ut_ad(mtr.has_committed());
-
-		if (error == DB_SUCCESS) {
-			error = row_log_table_apply_insert_low(
-				thr, row, offsets_heap, heap, dup);
-		}
-
-		goto func_exit_committed;
-	}
-
-	dtuple_t*	old_row;
-	row_ext_t*	old_ext;
-
-	if (dict_table_get_next_index(index)) {
-		/* Construct the row corresponding to the old value of
-		the record. */
-		old_row = row_build(
-			ROW_COPY_DATA, index, btr_pcur_get_rec(&pcur),
-			cur_offsets, NULL, NULL, NULL, &old_ext, heap);
-		ut_ad(old_row);
-
-		DBUG_LOG("ib_alter_table",
-			 "update table " << index->table->id
-			 << " (index " << index->id
-			 << ": " << rec_printer(old_row).str()
-			 << " to " << rec_printer(row).str());
-	} else {
-		old_row = NULL;
-		old_ext = NULL;
-	}
-
-	big_rec_t*	big_rec;
-
-	error = btr_cur_pessimistic_update(
-		BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
-		| BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG
-		| BTR_KEEP_POS_FLAG,
-		btr_pcur_get_btr_cur(&pcur),
-		&cur_offsets, &offsets_heap, heap, &big_rec,
-		update, 0, thr, 0, &mtr);
-
-	if (big_rec) {
-		if (error == DB_SUCCESS) {
-			error = btr_store_big_rec_extern_fields(
-				&pcur, cur_offsets, big_rec, &mtr,
-				BTR_STORE_UPDATE);
-		}
-
-		dtuple_big_rec_free(big_rec);
-	}
-
-	for (n_index += index->type != DICT_CLUSTERED;
-	     (index = dict_table_get_next_index(index)); n_index++) {
-		if (index->type & DICT_FTS) {
-			continue;
-		}
-
-		if (error != DB_SUCCESS) {
-			break;
-		}
-
-		if (!row_upd_changes_ord_field_binary(
-			    index, update, thr, old_row, NULL)) {
-			continue;
-		}
-
-		if (dict_index_has_virtual(index)) {
-			dtuple_copy_v_fields(old_row, old_pk);
-		}
-
-		mtr_commit(&mtr);
-
-		entry = row_build_index_entry(old_row, old_ext, index, heap);
-		if (!entry) {
-			ut_ad(0);
-			return(DB_CORRUPTION);
-		}
-
-		mtr_start(&mtr);
-		index->set_modified(mtr);
-
-		if (ROW_FOUND != row_search_index_entry(
-			    index, entry, BTR_MODIFY_TREE, &pcur, &mtr)) {
-			ut_ad(0);
-			error = DB_CORRUPTION;
-			break;
-		}
-
-		btr_cur_pessimistic_delete(
-			&error, FALSE, btr_pcur_get_btr_cur(&pcur),
-			BTR_CREATE_FLAG, false, &mtr);
-
-		if (error != DB_SUCCESS) {
-			break;
-		}
-
-		mtr_commit(&mtr);
-
-		entry = row_build_index_entry(row, NULL, index, heap);
-		error = row_ins_sec_index_entry_low(
-			BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
-			| BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG,
-			BTR_MODIFY_TREE, index, offsets_heap, heap,
-			entry, thr_get_trx(thr)->id, thr, false);
-
-		/* Report correct index name for duplicate key error. */
-		if (error == DB_DUPLICATE_KEY) {
-			thr_get_trx(thr)->error_key_num = n_index;
-		}
-
-		mtr_start(&mtr);
-		index->set_modified(mtr);
-	}
-
-	goto func_exit;
-}
-
-/******************************************************//**
-Applies an operation to a table that was rebuilt.
-@return NULL on failure (mrec corruption) or when out of data;
-pointer to next record on success */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-const mrec_t*
-row_log_table_apply_op(
-/*===================*/
-	que_thr_t*		thr,		/*!< in: query graph */
-	ulint			new_trx_id_col,	/*!< in: position of
-						DB_TRX_ID in new index */
-	row_merge_dup_t*	dup,		/*!< in/out: for reporting
-						duplicate key errors */
-	dberr_t*		error,		/*!< out: DB_SUCCESS
-						or error code */
-	mem_heap_t*		offsets_heap,	/*!< in/out: memory heap
-						that can be emptied */
-	mem_heap_t*		heap,		/*!< in/out: memory heap */
-	const mrec_t*		mrec,		/*!< in: merge record */
-	const mrec_t*		mrec_end,	/*!< in: end of buffer */
-	ulint*			offsets)	/*!< in/out: work area
-						for parsing mrec */
-{
-	row_log_t*	log	= dup->index->online_log;
-	dict_index_t*	new_index = dict_table_get_first_index(log->table);
-	ulint		extra_size;
-	const mrec_t*	next_mrec;
-	dtuple_t*	old_pk;
-
-	ut_ad(dict_index_is_clust(dup->index));
-	ut_ad(dup->index->table != log->table);
-	ut_ad(log->head.total <= log->tail.total);
-
-	*error = DB_SUCCESS;
-
-	/* 3 = 1 (op type) + 1 (extra_size) + at least 1 byte payload */
-	if (mrec + 3 >= mrec_end) {
-		return(NULL);
-	}
-
-	const bool is_instant = log->is_instant(dup->index);
-	const mrec_t* const mrec_start = mrec;
-
-	switch (*mrec++) {
-	default:
-		ut_ad(0);
-		*error = DB_CORRUPTION;
-		return(NULL);
-	case ROW_T_INSERT:
-		extra_size = *mrec++;
-
-		if (extra_size >= 0x80) {
-			/* Read another byte of extra_size. */
-
-			extra_size = (extra_size & 0x7f) << 8;
-			extra_size |= *mrec++;
-		}
-
-		mrec += extra_size;
-
-		ut_ad(extra_size || !is_instant);
-
-		if (mrec > mrec_end) {
-			return(NULL);
-		}
-
-		rec_offs_set_n_fields(offsets, dup->index->n_fields);
-		rec_init_offsets_temp(mrec, dup->index, offsets,
-				      log->n_core_fields, log->non_core_fields,
-				      is_instant
-				      ? static_cast<rec_comp_status_t>(
-					      *(mrec - extra_size))
-				      : REC_STATUS_ORDINARY);
-
-		next_mrec = mrec + rec_offs_data_size(offsets);
-
-		if (next_mrec > mrec_end) {
-			return(NULL);
-		} else {
-			log->head.total += ulint(next_mrec - mrec_start);
-			*error = row_log_table_apply_insert(
-				thr, mrec, offsets, offsets_heap,
-				heap, dup);
-		}
-		break;
-
-	case ROW_T_DELETE:
-		/* 1 (extra_size) + at least 1 (payload) */
-		if (mrec + 2 >= mrec_end) {
-			return(NULL);
-		}
-
-		extra_size = *mrec++;
-		ut_ad(mrec < mrec_end);
-
-		/* We assume extra_size < 0x100 for the PRIMARY KEY prefix.
-		For fixed-length PRIMARY key columns, it is 0. */
-		mrec += extra_size;
-
-		/* The ROW_T_DELETE record was converted by
-		rec_convert_dtuple_to_temp() using new_index. */
-		ut_ad(!new_index->is_instant());
-		rec_offs_set_n_fields(offsets, new_index->first_user_field());
-		rec_init_offsets_temp(mrec, new_index, offsets);
-		next_mrec = mrec + rec_offs_data_size(offsets);
-		if (next_mrec > mrec_end) {
-			return(NULL);
-		}
-
-		log->head.total += ulint(next_mrec - mrec_start);
-
-		*error = row_log_table_apply_delete(
-			new_trx_id_col,
-			mrec, offsets, offsets_heap, heap, log);
-		break;
-
-	case ROW_T_UPDATE:
-		/* Logically, the log entry consists of the
-		(PRIMARY KEY,DB_TRX_ID) of the old value (converted
-		to the new primary key definition) followed by
-		the new value in the old table definition. If the
-		definition of the columns belonging to PRIMARY KEY
-		is not changed, the log will only contain
-		DB_TRX_ID,new_row. */
-
-		if (log->same_pk) {
-			ut_ad(new_index->n_uniq == dup->index->n_uniq);
-
-			extra_size = *mrec++;
-
-			if (extra_size >= 0x80) {
-				/* Read another byte of extra_size. */
-
-				extra_size = (extra_size & 0x7f) << 8;
-				extra_size |= *mrec++;
-			}
-
-			mrec += extra_size;
-
-			ut_ad(extra_size || !is_instant);
-
-			if (mrec > mrec_end) {
-				return(NULL);
-			}
-
-			rec_offs_set_n_fields(offsets, dup->index->n_fields);
-			rec_init_offsets_temp(mrec, dup->index, offsets,
-					      log->n_core_fields,
-					      log->non_core_fields,
-					      is_instant
-					      ? static_cast<rec_comp_status_t>(
-						      *(mrec - extra_size))
-					      : REC_STATUS_ORDINARY);
-
-			next_mrec = mrec + rec_offs_data_size(offsets);
-
-			if (next_mrec > mrec_end) {
-				return(NULL);
-			}
-
-			old_pk = dtuple_create(heap, new_index->n_uniq);
-			dict_index_copy_types(
-				old_pk, new_index, old_pk->n_fields);
-
-			/* Copy the PRIMARY KEY fields from mrec to old_pk. */
-			for (ulint i = 0; i < new_index->n_uniq; i++) {
-				const void*	field;
-				ulint		len;
-				dfield_t*	dfield;
-
-				ut_ad(!rec_offs_nth_extern(offsets, i));
-
-				field = rec_get_nth_field(
-					mrec, offsets, i, &len);
-				ut_ad(len != UNIV_SQL_NULL);
-
-				dfield = dtuple_get_nth_field(old_pk, i);
-				dfield_set_data(dfield, field, len);
-			}
-		} else {
-			/* We assume extra_size < 0x100
-			for the PRIMARY KEY prefix. */
-			mrec += *mrec + 1;
-
-			if (mrec > mrec_end) {
-				return(NULL);
-			}
-
-			/* Get offsets for PRIMARY KEY,
-			DB_TRX_ID, DB_ROLL_PTR. */
-			/* The old_pk prefix was converted by
-			rec_convert_dtuple_to_temp() using new_index. */
-			ut_ad(!new_index->is_instant());
-			rec_offs_set_n_fields(offsets,
-					      new_index->first_user_field());
-			rec_init_offsets_temp(mrec, new_index, offsets);
-
-			next_mrec = mrec + rec_offs_data_size(offsets);
-			if (next_mrec + 2 > mrec_end) {
-				return(NULL);
-			}
-
-			/* Copy the PRIMARY KEY fields and
-			DB_TRX_ID, DB_ROLL_PTR from mrec to old_pk. */
-			old_pk = dtuple_create(heap,
-					       new_index->first_user_field());
-			dict_index_copy_types(old_pk, new_index,
-					      old_pk->n_fields);
-
-			for (ulint i = 0; i < new_index->first_user_field();
-			     i++) {
-				const void*	field;
-				ulint		len;
-				dfield_t*	dfield;
-
-				ut_ad(!rec_offs_nth_extern(offsets, i));
-
-				field = rec_get_nth_field(
-					mrec, offsets, i, &len);
-				ut_ad(len != UNIV_SQL_NULL);
-
-				dfield = dtuple_get_nth_field(old_pk, i);
-				dfield_set_data(dfield, field, len);
-			}
-
-			mrec = next_mrec;
-
-			/* Fetch the new value of the row as it was
-			in the old table definition. */
-			extra_size = *mrec++;
-
-			if (extra_size >= 0x80) {
-				/* Read another byte of extra_size. */
-
-				extra_size = (extra_size & 0x7f) << 8;
-				extra_size |= *mrec++;
-			}
-
-			mrec += extra_size;
-
-			ut_ad(extra_size || !is_instant);
-
-			if (mrec > mrec_end) {
-				return(NULL);
-			}
-
-			rec_offs_set_n_fields(offsets, dup->index->n_fields);
-			rec_init_offsets_temp(mrec, dup->index, offsets,
-					      log->n_core_fields,
-					      log->non_core_fields,
-					      is_instant
-					      ? static_cast<rec_comp_status_t>(
-						      *(mrec - extra_size))
-					      : REC_STATUS_ORDINARY);
-
-			next_mrec = mrec + rec_offs_data_size(offsets);
-
-			if (next_mrec > mrec_end) {
-				return(NULL);
-			}
-		}
-
-		ut_ad(next_mrec <= mrec_end);
-		log->head.total += ulint(next_mrec - mrec_start);
-		dtuple_set_n_fields_cmp(old_pk, new_index->n_uniq);
-
-		*error = row_log_table_apply_update(
-			thr, new_trx_id_col,
-			mrec, offsets, offsets_heap, heap, dup, old_pk);
-		break;
-	}
-
-	ut_ad(log->head.total <= log->tail.total);
-	mem_heap_empty(offsets_heap);
-	mem_heap_empty(heap);
-	return(next_mrec);
-}
-
-#ifdef HAVE_PSI_STAGE_INTERFACE
-/** Estimate how much an ALTER TABLE progress should be incremented per
-one block of log applied.
-For the other phases of ALTER TABLE we increment the progress with 1 per
-page processed.
-@return amount of abstract units to add to work_completed when one block
-of log is applied.
-*/
-inline
-ulint
-row_log_progress_inc_per_block()
-{
-	/* We must increment the progress once per page (as in
-	univ_page_size, usually 16KiB). One block here is srv_sort_buf_size
-	(usually 1MiB). */
-	const ulint	pages_per_block = std::max<ulint>(
-		ulint(srv_sort_buf_size >> srv_page_size_shift), 1);
-
-	/* Multiply by an artificial factor of 6 to even the pace with
-	the rest of the ALTER TABLE phases, they process page_size amount
-	of data faster. */
-	return(pages_per_block * 6);
-}
-
-/** Estimate how much work is to be done by the log apply phase
-of an ALTER TABLE for this index.
-@param[in]	index	index whose log to assess
-@return work to be done by log-apply in abstract units
-*/
-ulint
-row_log_estimate_work(
-	const dict_index_t*	index)
-{
-	if (index == NULL || index->online_log == NULL) {
-		return(0);
-	}
-
-	const row_log_t*	l = index->online_log;
-	const ulint		bytes_left =
-		static_cast<ulint>(l->tail.total - l->head.total);
-	const ulint		blocks_left = bytes_left / srv_sort_buf_size;
-
-	return(blocks_left * row_log_progress_inc_per_block());
-}
-#else /* HAVE_PSI_STAGE_INTERFACE */
-inline
-ulint
-row_log_progress_inc_per_block()
-{
-	return(0);
-}
-#endif /* HAVE_PSI_STAGE_INTERFACE */
-
-/** Applies operations to a table was rebuilt.
-@param[in]	thr	query graph
-@param[in,out]	dup	for reporting duplicate key errors
-@param[in,out]	stage	performance schema accounting object, used by
-ALTER TABLE. If not NULL, then stage->inc() will be called for each block
-of log that is applied.
-@return DB_SUCCESS, or error code on failure */
-static MY_ATTRIBUTE((warn_unused_result))
-dberr_t
-row_log_table_apply_ops(
-	que_thr_t*		thr,
-	row_merge_dup_t*	dup,
-	ut_stage_alter_t*	stage)
-{
-	dberr_t		error;
-	const mrec_t*	mrec		= NULL;
-	const mrec_t*	next_mrec;
-	const mrec_t*	mrec_end	= NULL; /* silence bogus warning */
-	const mrec_t*	next_mrec_end;
-	mem_heap_t*	heap;
-	mem_heap_t*	offsets_heap;
-	ulint*		offsets;
-	bool		has_index_lock;
-	dict_index_t*	index		= const_cast<dict_index_t*>(
-		dup->index);
-	dict_table_t*	new_table	= index->online_log->table;
-	dict_index_t*	new_index	= dict_table_get_first_index(
-		new_table);
-	const ulint	i		= 1 + REC_OFFS_HEADER_SIZE
-		+ std::max<ulint>(index->n_fields,
-				  new_index->first_user_field());
-	const ulint	new_trx_id_col	= dict_col_get_clust_pos(
-		dict_table_get_sys_col(new_table, DATA_TRX_ID), new_index);
-	trx_t*		trx		= thr_get_trx(thr);
-
-	ut_ad(dict_index_is_clust(index));
-	ut_ad(dict_index_is_online_ddl(index));
-	ut_ad(trx->mysql_thd);
-	ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X));
-	ut_ad(!dict_index_is_online_ddl(new_index));
-	ut_ad(dict_col_get_clust_pos(
-		      dict_table_get_sys_col(index->table, DATA_TRX_ID), index)
-	      != ULINT_UNDEFINED);
-	ut_ad(new_trx_id_col > 0);
-	ut_ad(new_trx_id_col != ULINT_UNDEFINED);
-
-	UNIV_MEM_INVALID(&mrec_end, sizeof mrec_end);
-
-	offsets = static_cast<ulint*>(ut_malloc_nokey(i * sizeof *offsets));
-	offsets[0] = i;
-	offsets[1] = dict_index_get_n_fields(index);
-
-	heap = mem_heap_create(srv_page_size);
-	offsets_heap = mem_heap_create(srv_page_size);
-	has_index_lock = true;
-
-next_block:
-	ut_ad(has_index_lock);
-	ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X));
-	ut_ad(index->online_log->head.bytes == 0);
-
-	stage->inc(row_log_progress_inc_per_block());
-
-	if (trx_is_interrupted(trx)) {
-		goto interrupted;
-	}
-
-	if (index->is_corrupted()) {
-		error = DB_INDEX_CORRUPT;
-		goto func_exit;
-	}
-
-	ut_ad(dict_index_is_online_ddl(index));
-
-	error = index->online_log->error;
-
-	if (error != DB_SUCCESS) {
-		goto func_exit;
-	}
-
-	if (UNIV_UNLIKELY(index->online_log->head.blocks
-			  > index->online_log->tail.blocks)) {
-unexpected_eof:
-		ib::error() << "Unexpected end of temporary file for table "
-			<< index->table->name;
-corruption:
-		error = DB_CORRUPTION;
-		goto func_exit;
-	}
-
-	if (index->online_log->head.blocks
-	    == index->online_log->tail.blocks) {
-		if (index->online_log->head.blocks) {
-#ifdef HAVE_FTRUNCATE
-			/* Truncate the file in order to save space. */
-			if (index->online_log->fd > 0
-			    && ftruncate(index->online_log->fd, 0) == -1) {
-				ib::error()
-					<< "\'" << index->name + 1
-					<< "\' failed with error "
-					<< errno << ":" << strerror(errno);
-
-				goto corruption;
-			}
-#endif /* HAVE_FTRUNCATE */
-			index->online_log->head.blocks
-				= index->online_log->tail.blocks = 0;
-		}
-
-		next_mrec = index->online_log->tail.block;
-		next_mrec_end = next_mrec + index->online_log->tail.bytes;
-
-		if (next_mrec_end == next_mrec) {
-			/* End of log reached. */
-all_done:
-			ut_ad(has_index_lock);
-			ut_ad(index->online_log->head.blocks == 0);
-			ut_ad(index->online_log->tail.blocks == 0);
-			index->online_log->head.bytes = 0;
-			index->online_log->tail.bytes = 0;
-			error = DB_SUCCESS;
-			goto func_exit;
-		}
-	} else {
-		os_offset_t	ofs;
-
-		ofs = (os_offset_t) index->online_log->head.blocks
-			* srv_sort_buf_size;
-
-		ut_ad(has_index_lock);
-		has_index_lock = false;
-		rw_lock_x_unlock(dict_index_get_lock(index));
-
-		log_free_check();
-
-		ut_ad(dict_index_is_online_ddl(index));
-
-		if (!row_log_block_allocate(index->online_log->head)) {
-			error = DB_OUT_OF_MEMORY;
-			goto func_exit;
-		}
-
-		IORequest		request(IORequest::READ);
-		byte*			buf = index->online_log->head.block;
-
-		if (!os_file_read_no_error_handling(
-			    request, index->online_log->fd,
-			    buf, ofs, srv_sort_buf_size, 0)) {
-			ib::error()
-				<< "Unable to read temporary file"
-				" for table " << index->table->name;
-			goto corruption;
-		}
-
-		if (log_tmp_is_encrypted()) {
-			if (!log_tmp_block_decrypt(
-				    buf, srv_sort_buf_size,
-				    index->online_log->crypt_head,
-				    ofs, index->table->space->id)) {
-				error = DB_DECRYPTION_FAILED;
-				goto func_exit;
-			}
-
-			srv_stats.n_rowlog_blocks_decrypted.inc();
-			memcpy(buf, index->online_log->crypt_head,
-			       srv_sort_buf_size);
-		}
-
-#ifdef POSIX_FADV_DONTNEED
-		/* Each block is read exactly once.  Free up the file cache. */
-		posix_fadvise(index->online_log->fd,
-			      ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED);
-#endif /* POSIX_FADV_DONTNEED */
-
-		next_mrec = index->online_log->head.block;
-		next_mrec_end = next_mrec + srv_sort_buf_size;
-	}
-
-	/* This read is not protected by index->online_log->mutex for
-	performance reasons. We will eventually notice any error that
-	was flagged by a DML thread. */
-	error = index->online_log->error;
-
-	if (error != DB_SUCCESS) {
-		goto func_exit;
-	}
-
-	if (mrec) {
-		/* A partial record was read from the previous block.
-		Copy the temporary buffer full, as we do not know the
-		length of the record. Parse subsequent records from
-		the bigger buffer index->online_log->head.block
-		or index->online_log->tail.block. */
-
-		ut_ad(mrec == index->online_log->head.buf);
-		ut_ad(mrec_end > mrec);
-		ut_ad(mrec_end < (&index->online_log->head.buf)[1]);
-
-		memcpy((mrec_t*) mrec_end, next_mrec,
-		       ulint((&index->online_log->head.buf)[1] - mrec_end));
-		mrec = row_log_table_apply_op(
-			thr, new_trx_id_col,
-			dup, &error, offsets_heap, heap,
-			index->online_log->head.buf,
-			(&index->online_log->head.buf)[1], offsets);
-		if (error != DB_SUCCESS) {
-			goto func_exit;
-		} else if (UNIV_UNLIKELY(mrec == NULL)) {
-			/* The record was not reassembled properly. */
-			goto corruption;
-		}
-		/* The record was previously found out to be
-		truncated. Now that the parse buffer was extended,
-		it should proceed beyond the old end of the buffer. */
-		ut_a(mrec > mrec_end);
-
-		index->online_log->head.bytes = ulint(mrec - mrec_end);
-		next_mrec += index->online_log->head.bytes;
-	}
-
-	ut_ad(next_mrec <= next_mrec_end);
-	/* The following loop must not be parsing the temporary
-	buffer, but head.block or tail.block. */
-
-	/* mrec!=NULL means that the next record starts from the
-	middle of the block */
-	ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0));
-
-#ifdef UNIV_DEBUG
-	if (next_mrec_end == index->online_log->head.block
-	    + srv_sort_buf_size) {
-		/* If tail.bytes == 0, next_mrec_end can also be at
-		the end of tail.block. */
-		if (index->online_log->tail.bytes == 0) {
-			ut_ad(next_mrec == next_mrec_end);
-			ut_ad(index->online_log->tail.blocks == 0);
-			ut_ad(index->online_log->head.blocks == 0);
-			ut_ad(index->online_log->head.bytes == 0);
-		} else {
-			ut_ad(next_mrec == index->online_log->head.block
-			      + index->online_log->head.bytes);
-			ut_ad(index->online_log->tail.blocks
-			      > index->online_log->head.blocks);
-		}
-	} else if (next_mrec_end == index->online_log->tail.block
-		   + index->online_log->tail.bytes) {
-		ut_ad(next_mrec == index->online_log->tail.block
-		      + index->online_log->head.bytes);
-		ut_ad(index->online_log->tail.blocks == 0);
-		ut_ad(index->online_log->head.blocks == 0);
-		ut_ad(index->online_log->head.bytes
-		      <= index->online_log->tail.bytes);
-	} else {
-		ut_error;
-	}
-#endif /* UNIV_DEBUG */
-
-	mrec_end = next_mrec_end;
-
-	while (!trx_is_interrupted(trx)) {
-		mrec = next_mrec;
-		ut_ad(mrec <= mrec_end);
-
-		if (mrec == mrec_end) {
-			/* We are at the end of the log.
-			   Mark the replay all_done. */
-			if (has_index_lock) {
-				goto all_done;
-			}
-		}
-
-		if (!has_index_lock) {
-			/* We are applying operations from a different
-			block than the one that is being written to.
-			We do not hold index->lock in order to
-			allow other threads to concurrently buffer
-			modifications. */
-			ut_ad(mrec >= index->online_log->head.block);
-			ut_ad(mrec_end == index->online_log->head.block
-			      + srv_sort_buf_size);
-			ut_ad(index->online_log->head.bytes
-			      < srv_sort_buf_size);
-
-			/* Take the opportunity to do a redo log
-			checkpoint if needed. */
-			log_free_check();
-		} else {
-			/* We are applying operations from the last block.
-			Do not allow other threads to buffer anything,
-			so that we can finally catch up and synchronize. */
-			ut_ad(index->online_log->head.blocks == 0);
-			ut_ad(index->online_log->tail.blocks == 0);
-			ut_ad(mrec_end == index->online_log->tail.block
-			      + index->online_log->tail.bytes);
-			ut_ad(mrec >= index->online_log->tail.block);
-		}
-
-		/* This read is not protected by index->online_log->mutex
-		for performance reasons. We will eventually notice any
-		error that was flagged by a DML thread. */
-		error = index->online_log->error;
-
-		if (error != DB_SUCCESS) {
-			goto func_exit;
-		}
-
-		next_mrec = row_log_table_apply_op(
-			thr, new_trx_id_col,
-			dup, &error, offsets_heap, heap,
-			mrec, mrec_end, offsets);
-
-		if (error != DB_SUCCESS) {
-			goto func_exit;
-		} else if (next_mrec == next_mrec_end) {
-			/* The record happened to end on a block boundary.
-			Do we have more blocks left? */
-			if (has_index_lock) {
-				/* The index will be locked while
-				applying the last block. */
-				goto all_done;
-			}
-
-			mrec = NULL;
-process_next_block:
-			rw_lock_x_lock(dict_index_get_lock(index));
-			has_index_lock = true;
-
-			index->online_log->head.bytes = 0;
-			index->online_log->head.blocks++;
-			goto next_block;
-		} else if (next_mrec != NULL) {
-			ut_ad(next_mrec < next_mrec_end);
-			index->online_log->head.bytes
-				+= ulint(next_mrec - mrec);
-		} else if (has_index_lock) {
-			/* When mrec is within tail.block, it should
-			be a complete record, because we are holding
-			index->lock and thus excluding the writer. */
-			ut_ad(index->online_log->tail.blocks == 0);
-			ut_ad(mrec_end == index->online_log->tail.block
-			      + index->online_log->tail.bytes);
-			ut_ad(0);
-			goto unexpected_eof;
-		} else {
-			memcpy(index->online_log->head.buf, mrec,
-			       ulint(mrec_end - mrec));
-			mrec_end += ulint(index->online_log->head.buf - mrec);
-			mrec = index->online_log->head.buf;
-			goto process_next_block;
-		}
-	}
-
-interrupted:
-	error = DB_INTERRUPTED;
-func_exit:
-	if (!has_index_lock) {
-		rw_lock_x_lock(dict_index_get_lock(index));
-	}
-
-	mem_heap_free(offsets_heap);
-	mem_heap_free(heap);
-	row_log_block_free(index->online_log->head);
-	ut_free(offsets);
-	return(error);
-}
-
-/** Apply the row_log_table log to a table upon completing rebuild.
-@param[in]	thr		query graph
-@param[in]	old_table	old table
-@param[in,out]	table		MySQL table (for reporting duplicates)
-@param[in,out]	stage		performance schema accounting object, used by
-ALTER TABLE. stage->begin_phase_log_table() will be called initially and then
-stage->inc() will be called for each block of log that is applied.
-@param[in]	new_table	Altered table
-@return DB_SUCCESS, or error code on failure */
-dberr_t
-row_log_table_apply(
-	que_thr_t*		thr,
-	dict_table_t*		old_table,
-	struct TABLE*		table,
-	ut_stage_alter_t*	stage,
-	dict_table_t*		new_table)
-{
-	dberr_t		error;
-	dict_index_t*	clust_index;
-
-	thr_get_trx(thr)->error_key_num = 0;
-	DBUG_EXECUTE_IF("innodb_trx_duplicates",
-			thr_get_trx(thr)->duplicates = TRX_DUP_REPLACE;);
-
-	stage->begin_phase_log_table();
-
-	ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_S));
-	clust_index = dict_table_get_first_index(old_table);
-
-	if (clust_index->online_log->n_rows == 0) {
-		clust_index->online_log->n_rows = new_table->stat_n_rows;
-	}
-
-	rw_lock_x_lock(dict_index_get_lock(clust_index));
-
-	if (!clust_index->online_log) {
-		ut_ad(dict_index_get_online_status(clust_index)
-		      == ONLINE_INDEX_COMPLETE);
-		/* This function should not be called unless
-		rebuilding a table online. Build in some fault
-		tolerance. */
-		ut_ad(0);
-		error = DB_ERROR;
-	} else {
-		row_merge_dup_t	dup = {
-			clust_index, table,
-			clust_index->online_log->col_map, 0
-		};
-
-		error = row_log_table_apply_ops(thr, &dup, stage);
-
-		ut_ad(error != DB_SUCCESS
-		      || clust_index->online_log->head.total
-		      == clust_index->online_log->tail.total);
-	}
-
-	rw_lock_x_unlock(dict_index_get_lock(clust_index));
-	DBUG_EXECUTE_IF("innodb_trx_duplicates",
-			thr_get_trx(thr)->duplicates = 0;);
-
-	return(error);
-}
-
-/******************************************************//**
-Allocate the row log for an index and flag the index
-for online creation.
-@retval true if success, false if not */
-bool
-row_log_allocate(
-/*=============*/
-	const trx_t*	trx,	/*!< in: the ALTER TABLE transaction */
-	dict_index_t*	index,	/*!< in/out: index */
-	dict_table_t*	table,	/*!< in/out: new table being rebuilt,
-				or NULL when creating a secondary index */
-	bool		same_pk,/*!< in: whether the definition of the
-				PRIMARY KEY has remained the same */
-	const dtuple_t*	defaults,
-				/*!< in: default values of
-				added, changed columns, or NULL */
-	const ulint*	col_map,/*!< in: mapping of old column
-				numbers to new ones, or NULL if !table */
-	const char*	path,	/*!< in: where to create temporary file */
-	const TABLE*	old_table,	/*!< in: table definition before alter */
-	const bool	allow_not_null) /*!< in: allow null to not-null
-					conversion */
-{
-	row_log_t*	log;
-	DBUG_ENTER("row_log_allocate");
-
-	ut_ad(!dict_index_is_online_ddl(index));
-	ut_ad(dict_index_is_clust(index) == !!table);
-	ut_ad(!table || index->table != table);
-	ut_ad(same_pk || table);
-	ut_ad(!table || col_map);
-	ut_ad(!defaults || col_map);
-	ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X));
-	ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
-	ut_ad(trx->id);
-
-	log = static_cast<row_log_t*>(ut_malloc_nokey(sizeof *log));
-
-	if (log == NULL) {
-		DBUG_RETURN(false);
-	}
-
-	log->fd = OS_FILE_CLOSED;
-	mutex_create(LATCH_ID_INDEX_ONLINE_LOG, &log->mutex);
-
-	log->blobs = NULL;
-	log->table = table;
-	log->same_pk = same_pk;
-	log->defaults = defaults;
-	log->col_map = col_map;
 	log->error = DB_SUCCESS;
-	log->min_trx = trx->id;
 	log->max_trx = 0;
 	log->tail.blocks = log->tail.bytes = 0;
-	log->tail.total = 0;
 	log->tail.block = log->head.block = NULL;
 	log->crypt_tail = log->crypt_head = NULL;
 	log->head.blocks = log->head.bytes = 0;
-	log->head.total = 0;
 	log->path = path;
-	log->n_core_fields = index->n_core_fields;
-	ut_ad(!table || log->is_instant(index)
-	      == (index->n_core_fields < index->n_fields));
-	log->allow_not_null = allow_not_null;
 	log->old_table = old_table;
 	log->n_rows = 0;
 
-	if (table && index->is_instant()) {
-		const unsigned n = log->n_core_fields;
-		log->non_core_fields = UT_NEW_ARRAY_NOKEY(
-			dict_col_t::def_t, index->n_fields - n);
-		for (unsigned i = n; i < index->n_fields; i++) {
-			log->non_core_fields[i - n]
-				= index->fields[i].col->def_val;
-		}
-	} else {
-		log->non_core_fields = NULL;
-	}
-
 	dict_index_set_online_status(index, ONLINE_INDEX_CREATION);
 	index->online_log = log;
 
@@ -3256,8 +394,6 @@ row_log_free(
 {
 	MONITOR_ATOMIC_DEC(MONITOR_ONLINE_CREATE_INDEX);
 
-	UT_DELETE(log->blobs);
-	UT_DELETE_ARRAY(log->non_core_fields);
 	row_log_block_free(log->tail);
 	row_log_block_free(log->head);
 	row_merge_file_destroy_low(log->fd);
@@ -3690,7 +826,7 @@ row_log_apply_ops(
 	ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X));
 	ut_ad(index->online_log->head.bytes == 0);
 
-	stage->inc(row_log_progress_inc_per_block());
+	stage->inc(0/*FIXME*/);
 
 	if (trx_is_interrupted(trx)) {
 		goto interrupted;
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index fa369566d9a..ec8744c88ed 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1691,7 +1691,6 @@ stage->inc() will be called for each page read.
 @param[in,out]	crypt_block	crypted file buffer
 @param[in]	eval_table	mysql table used to evaluate virtual column
 				value, see innobase_get_computed_value().
-@param[in]	allow_not_null	allow null to not-null conversion
 @return DB_SUCCESS or error */
 static MY_ATTRIBUTE((warn_unused_result))
 dberr_t
@@ -1718,8 +1717,7 @@ row_merge_read_clustered_index(
 	ut_stage_alter_t*	stage,
 	double 			pct_cost,
 	row_merge_block_t*	crypt_block,
-	struct TABLE*		eval_table,
-	bool			allow_not_null)
+	struct TABLE*		eval_table)
 {
 	dict_index_t*		clust_index;	/* Clustered index */
 	mem_heap_t*		row_heap;	/* Heap memory to create
@@ -1970,14 +1968,6 @@ row_merge_read_clustered_index(
 				goto func_exit;
 			}
 
-			if (online && old_table != new_table) {
-				err = row_log_table_get_error(clust_index);
-				if (err != DB_SUCCESS) {
-					trx->error_key_num = 0;
-					goto func_exit;
-				}
-			}
-
 			/* Insert the cached spatial index rows. */
 			bool	mtr_committed = false;
 
@@ -2074,14 +2064,6 @@ row_merge_read_clustered_index(
 
 			/* Perform a REPEATABLE READ.
 
-			When rebuilding the table online,
-			row_log_table_apply() must not see a newer
-			state of the table when applying the log.
-			This is mainly to prevent false duplicate key
-			errors, because the log will identify records
-			by the PRIMARY KEY, and also to prevent unsafe
-			BLOB access.
-
 			When creating a secondary index online, this
 			table scan must not see records that have only
 			been inserted to the clustered index, but have
@@ -2203,16 +2185,9 @@ row_merge_read_clustered_index(
 					WARN_DATA_TRUNCATED, 1,
 					ulong(n_rows + 1));
 
-				if (!allow_not_null) {
-					err = DB_INVALID_NULL;
-					trx->error_key_num = 0;
-					goto func_exit;
-				}
-
-				const dfield_t& default_field
-					= defaults->fields[nonnull[i]];
-
-				*field = default_field;
+				err = DB_INVALID_NULL;
+				trx->error_key_num = 0;
+				goto func_exit;
 			}
 		}
 
@@ -3556,7 +3531,6 @@ row_merge_insert_index_tuples(
 	dtuple_t*		dtuple;
 	ib_uint64_t		inserted_rows = 0;
 	double			curr_progress = 0;
-	dict_index_t*		old_index = NULL;
 	const mrec_t*		mrec  = NULL;
 	ulint			n_ext = 0;
 	mtr_t			mtr;
@@ -3647,16 +3621,6 @@ row_merge_insert_index_tuples(
 				mrec, index, offsets, &n_ext, tuple_heap);
 		}
 
-		old_index	= dict_table_get_first_index(old_table);
-
-		if (dict_index_is_clust(index)
-		    && dict_index_is_online_ddl(old_index)) {
-			error = row_log_table_get_error(old_index);
-			if (error != DB_SUCCESS) {
-				break;
-			}
-		}
-
 		if (!n_ext) {
 			/* There are no externally stored columns. */
 		} else {
@@ -4575,7 +4539,6 @@ this function and it will be passed to other functions for further accounting.
 @param[in]	add_v		new virtual columns added along with indexes
 @param[in]	eval_table	mysql table used to evaluate virtual column
 				value, see innobase_get_computed_value().
-@param[in]	allow_not_null	allow the conversion from null to not-null
 @return DB_SUCCESS or error code */
 dberr_t
 row_merge_build_indexes(
@@ -4594,8 +4557,7 @@ row_merge_build_indexes(
 	bool			skip_pk_sort,
 	ut_stage_alter_t*	stage,
 	const dict_add_v_col_t*	add_v,
-	struct TABLE*		eval_table,
-	bool			allow_not_null)
+	struct TABLE*		eval_table)
 {
 	merge_file_t*		merge_files;
 	row_merge_block_t*	block;
@@ -4763,7 +4725,7 @@ row_merge_build_indexes(
 		fts_sort_idx, psort_info, merge_files, key_numbers,
 		n_indexes, defaults, add_v, col_map, add_autoinc,
 		sequence, block, skip_pk_sort, &tmpfd, stage,
-		pct_cost, crypt_block, eval_table, allow_not_null);
+		pct_cost, crypt_block, eval_table);
 
 	stage->end_phase_read_pk();
 
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 3a1690782ff..75dd0ea3f28 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -71,7 +71,6 @@ row_undo_ins_remove_clust_rec(
 	ulint		n_tries	= 0;
 	mtr_t		mtr;
 	dict_index_t*	index	= node->pcur.btr_cur.index;
-	bool		online;
 
 	ut_ad(dict_index_is_clust(index));
 	ut_ad(node->trx->in_rollback);
@@ -90,18 +89,8 @@ row_undo_ins_remove_clust_rec(
 	purged. However, we can log the removal out of sync with the
 	B-tree modification. */
 
-	online = dict_index_is_online_ddl(index);
-	if (online) {
-		ut_ad(node->trx->dict_operation_lock_mode
-		      != RW_X_LATCH);
-		ut_ad(node->table->id != DICT_INDEXES_ID);
-		mtr_s_lock(dict_index_get_lock(index), &mtr);
-	}
-
 	success = btr_pcur_restore_position(
-		online
-		? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
-		: BTR_MODIFY_LEAF, &node->pcur, &mtr);
+		BTR_MODIFY_LEAF, &node->pcur, &mtr);
 	ut_a(success);
 
 	btr_cur = btr_pcur_get_btr_cur(&node->pcur);
@@ -111,19 +100,10 @@ row_undo_ins_remove_clust_rec(
 	ut_ad(!rec_get_deleted_flag(
 		      btr_cur_get_rec(btr_cur),
 		      dict_table_is_comp(btr_cur->index->table)));
-
-	if (online && dict_index_is_online_ddl(index)) {
-		const rec_t*	rec	= btr_cur_get_rec(btr_cur);
-		mem_heap_t*	heap	= NULL;
-		const ulint*	offsets	= rec_get_offsets(
-			rec, index, NULL, true, ULINT_UNDEFINED, &heap);
-		row_log_table_delete(rec, index, offsets, NULL);
-		mem_heap_free(heap);
-	}
+	// FIXME: Add a callback to report the delete.
 
 	switch (node->table->id) {
 	case DICT_INDEXES_ID:
-		ut_ad(!online);
 		ut_ad(node->trx->dict_operation_lock_mode == RW_X_LATCH);
 		ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
 
@@ -145,7 +125,6 @@ row_undo_ins_remove_clust_rec(
 		reloaded after the dictionary operation has been
 		completed. At this point, any corresponding operation
 		to the metadata record will have been rolled back. */
-		ut_ad(!online);
 		ut_ad(node->trx->dict_operation_lock_mode == RW_X_LATCH);
 		ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
 		const rec_t* rec = btr_pcur_get_rec(&node->pcur);
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 8b68b277719..1ee9f794409 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -82,13 +82,6 @@ row_undo_mod_clust_low(
 	mem_heap_t**	offsets_heap,
 				/*!< in/out: memory heap that can be emptied */
 	mem_heap_t*	heap,	/*!< in/out: memory heap */
-	const dtuple_t**rebuilt_old_pk,
-				/*!< out: row_log_table_get_pk()
-				before the update, or NULL if
-				the table is not being rebuilt online or
-				the PRIMARY KEY definition does not change */
-	byte*		sys,	/*!< out: DB_TRX_ID, DB_ROLL_PTR
-				for row_log_table_delete() */
 	que_thr_t*	thr,	/*!< in: query thread */
 	mtr_t*		mtr,	/*!< in: mtr; must be committed before
 				latching any further pages */
@@ -117,15 +110,6 @@ row_undo_mod_clust_low(
 	      || node->update->info_bits == REC_INFO_METADATA_ADD
 	      || node->update->info_bits == REC_INFO_METADATA_ALTER);
 
-	if (mode != BTR_MODIFY_LEAF
-	    && dict_index_is_online_ddl(btr_cur_get_index(btr_cur))) {
-		*rebuilt_old_pk = row_log_table_get_pk(
-			btr_cur_get_rec(btr_cur),
-			btr_cur_get_index(btr_cur), NULL, sys, &heap);
-	} else {
-		*rebuilt_old_pk = NULL;
-	}
-
 	if (mode != BTR_MODIFY_TREE) {
 		ut_ad((mode & ulint(~BTR_ALREADY_S_LATCHED))
 		      == BTR_MODIFY_LEAF);
@@ -267,7 +251,6 @@ row_undo_mod_clust(
 	mtr_t		mtr;
 	dberr_t		err;
 	dict_index_t*	index;
-	bool		online;
 
 	ut_ad(thr_get_trx(thr) == node->trx);
 	ut_ad(node->trx->dict_operation_lock_mode);
@@ -287,26 +270,15 @@ row_undo_mod_clust(
 		index->set_modified(mtr);
 	}
 
-	online = dict_index_is_online_ddl(index);
-	if (online) {
-		ut_ad(node->trx->dict_operation_lock_mode != RW_X_LATCH);
-		mtr_s_lock(dict_index_get_lock(index), &mtr);
-	}
-
 	mem_heap_t*	heap		= mem_heap_create(1024);
 	mem_heap_t*	offsets_heap	= NULL;
 	ulint*		offsets		= NULL;
-	const dtuple_t*	rebuilt_old_pk;
-	byte		sys[DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN];
 
 	/* Try optimistic processing of the record, keeping changes within
 	the index page */
 
 	err = row_undo_mod_clust_low(node, &offsets, &offsets_heap,
-				     heap, &rebuilt_old_pk, sys,
-				     thr, &mtr, online
-				     ? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
-				     : BTR_MODIFY_LEAF);
+				     heap, thr, &mtr, BTR_MODIFY_LEAF);
 
 	if (err != DB_SUCCESS) {
 		btr_pcur_commit_specify_mtr(pcur, &mtr);
@@ -323,41 +295,11 @@ row_undo_mod_clust(
 
 		err = row_undo_mod_clust_low(
 			node, &offsets, &offsets_heap,
-			heap, &rebuilt_old_pk, sys,
-			thr, &mtr, BTR_MODIFY_TREE);
+			heap, thr, &mtr, BTR_MODIFY_TREE);
 		ut_ad(err == DB_SUCCESS || err == DB_OUT_OF_FILE_SPACE);
 	}
 
-	/* Online rebuild cannot be initiated while we are holding
-	dict_operation_lock and index->lock. (It can be aborted.) */
-	ut_ad(online || !dict_index_is_online_ddl(index));
-
-	if (err == DB_SUCCESS && online) {
-
-		ut_ad(rw_lock_own_flagged(
-				&index->lock,
-				RW_LOCK_FLAG_S | RW_LOCK_FLAG_X
-				| RW_LOCK_FLAG_SX));
-
-		switch (node->rec_type) {
-		case TRX_UNDO_DEL_MARK_REC:
-			row_log_table_insert(
-				btr_pcur_get_rec(pcur), index, offsets);
-			break;
-		case TRX_UNDO_UPD_EXIST_REC:
-			row_log_table_update(
-				btr_pcur_get_rec(pcur), index, offsets,
-				rebuilt_old_pk);
-			break;
-		case TRX_UNDO_UPD_DEL_REC:
-			row_log_table_delete(
-				btr_pcur_get_rec(pcur), index, offsets, sys);
-			break;
-		default:
-			ut_ad(0);
-			break;
-		}
-	}
+	// FIXME: Add trigger to log the insert/update/delete.
 
 	/**
 	* when scrubbing, and records gets cleared,
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index f9567de3c1f..c0d06a38869 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1537,16 +1537,7 @@ row_upd_replace_vcol(
 		/* If there is no index on the column, do not bother for
 		value update */
 		if (!col->m_col.ord_part) {
-			dict_index_t*	clust_index
-				= dict_table_get_first_index(table);
-
-			/* Skip the column if there is no online alter
-			table in progress or it is not being indexed
-			in new table */
-			if (!dict_index_is_online_ddl(clust_index)
-			    || !row_log_col_is_indexed(clust_index, col_no)) {
-				continue;
-			}
+			continue;
 		}
 
 		dfield = dtuple_get_nth_v_field(row, col_no);
@@ -2896,7 +2887,6 @@ row_upd_clust_rec(
 	btr_pcur_t*	pcur;
 	btr_cur_t*	btr_cur;
 	dberr_t		err;
-	const dtuple_t*	rebuilt_old_pk	= NULL;
 
 	ut_ad(node);
 	ut_ad(dict_index_is_clust(index));
@@ -2911,11 +2901,6 @@ row_upd_clust_rec(
 				    dict_table_is_comp(index->table)));
 	ut_ad(rec_offs_validate(btr_cur_get_rec(btr_cur), index, offsets));
 
-	if (dict_index_is_online_ddl(index)) {
-		rebuilt_old_pk = row_log_table_get_pk(
-			btr_cur_get_rec(btr_cur), index, offsets, NULL, &heap);
-	}
-
 	/* Try optimistic updating of the record, keeping changes within
 	the page; we do not check locks because we assume the x-lock on the
 	record to update */
@@ -2986,15 +2971,7 @@ row_upd_clust_rec(
 		DEBUG_SYNC_C("after_row_upd_extern");
 	}
 
-	if (err == DB_SUCCESS) {
 success:
-		if (dict_index_is_online_ddl(index)) {
-			row_log_table_update(
-				btr_cur_get_rec(btr_cur),
-				index, offsets, rebuilt_old_pk);
-		}
-	}
-
 	mtr_commit(mtr);
 func_exit:
 	if (heap) {
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 94e5954c884..c4d31d9e303 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -668,11 +668,6 @@ row_log_apply(). */
 PSI_stage_info	srv_stage_alter_table_log_index
 	= {0, "alter table (log apply index)", PSI_FLAG_STAGE_PROGRESS};
 
-/** Performance schema stage event for monitoring ALTER TABLE progress
-row_log_table_apply(). */
-PSI_stage_info	srv_stage_alter_table_log_table
-	= {0, "alter table (log apply table)", PSI_FLAG_STAGE_PROGRESS};
-
 /** Performance schema stage event for monitoring ALTER TABLE progress
 row_merge_sort(). */
 PSI_stage_info	srv_stage_alter_table_merge_sort
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 03c91167484..e671699c9d7 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -222,7 +222,6 @@ static PSI_stage_info*	srv_stages[] =
 	&srv_stage_alter_table_flush,
 	&srv_stage_alter_table_insert,
 	&srv_stage_alter_table_log_index,
-	&srv_stage_alter_table_log_table,
 	&srv_stage_alter_table_merge_sort,
 	&srv_stage_alter_table_read_pk_internal_sort,
 	&srv_stage_buffer_pool_load,
-- 
2.19.1

