From 6203deb02fd04aab055c09bd1df7299ea2574ad7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= <marko.makela@mariadb.com>
Date: Thu, 3 Oct 2019 10:47:45 +0300
Subject: [PATCH] Stop buffering delete (purge) operations

The delete buffering was only used in the purge of history.
It could have been used in the rollback of INSERT operations, but it
never was. (Likewise, the delete-mark buffering is never attempted
on rollback, only on DELETE or the UPDATE of a PRIMARY KEY.)

A combination of two asynchronous, inherently nondeterministic operations
(purge and change buffering) is difficult to cover in tests or to reason
about. The purge buffering required a complex mechanism in the buffer pool,
the buffer pool watch. If we no longer buffer purge operations, we
can remove the watch as well.

MDEV-16260 will attempt to improve the performance of purge in a more
controlled fashion by scaling the effort according to the workload.

We will retain the code that merges buffered purge operations,
so that upgrades from older versions will be possible.

BTR_DELETE_OP, BTR_DELETE, BUF_BLOCK_POOL_WATCH,
BUF_GET_IF_IN_POOL_OR_WATCH, BUF_POOL_WATCH_SIZE,
ROW_NOT_DELETED_REF: Remove.

btr_cur_t::purge_node, buf_pool_t::watch: Remove.

ibuf_get_volume_buffered_hash(): Remove. It is no longer necessary
to estimate whether the page could become empty.
---
 storage/innobase/btr/btr0cur.cc       |  39 +--
 storage/innobase/buf/buf0buf.cc       | 453 ++------------------------
 storage/innobase/buf/buf0flu.cc       |   2 -
 storage/innobase/buf/buf0lru.cc       |   3 -
 storage/innobase/handler/ha_innodb.cc |   2 +-
 storage/innobase/ibuf/ibuf0ibuf.cc    | 221 ++-----------
 storage/innobase/include/btr0btr.h    |  12 +-
 storage/innobase/include/btr0cur.h    |   7 +-
 storage/innobase/include/buf0buf.h    |  54 +--
 storage/innobase/include/buf0buf.ic   |  27 +-
 storage/innobase/include/ibuf0ibuf.h  |   4 +-
 storage/innobase/include/row0row.h    |   6 +-
 storage/innobase/row/row0log.cc       |   3 -
 storage/innobase/row/row0purge.cc     |  15 +-
 storage/innobase/row/row0row.cc       |   4 -
 storage/innobase/row/row0uins.cc      |   5 +-
 storage/innobase/row/row0umod.cc      |  10 +-
 storage/innobase/row/row0upd.cc       |   3 -
 18 files changed, 88 insertions(+), 782 deletions(-)

diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 940590cd7de..3ffa611dd66 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -73,7 +73,6 @@ enum btr_op_t {
 	BTR_NO_OP = 0,			/*!< Not buffered */
 	BTR_INSERT_OP,			/*!< Insert, do not ignore UNIQUE */
 	BTR_INSERT_IGNORE_UNIQUE_OP,	/*!< Insert, ignoring UNIQUE */
-	BTR_DELETE_OP,			/*!< Purge a delete-marked record */
 	BTR_DELMARK_OP			/*!< Mark a record for deletion */
 };
 
@@ -1160,7 +1159,7 @@ btr_cur_search_to_nth_level_func(
 				PAGE_CUR_LE to search the position! */
 	ulint		latch_mode, /*!< in: BTR_SEARCH_LEAF, ..., ORed with
 				at most one of BTR_INSERT, BTR_DELETE_MARK,
-				BTR_DELETE, or BTR_ESTIMATE;
+				or BTR_ESTIMATE;
 				cursor->left_block is used to store a pointer
 				to the left neighbor page, in the cases
 				BTR_SEARCH_PREV and BTR_MODIFY_PREV;
@@ -1265,7 +1264,7 @@ btr_cur_search_to_nth_level_func(
 	with the latch mode for historical reasons. It's possible for
 	none of the flags to be set. */
 	switch (UNIV_EXPECT(latch_mode
-			    & (BTR_INSERT | BTR_DELETE | BTR_DELETE_MARK),
+			    & (BTR_INSERT | BTR_DELETE_MARK),
 			    0)) {
 	case 0:
 		btr_op = BTR_NO_OP;
@@ -1275,15 +1274,11 @@ btr_cur_search_to_nth_level_func(
 			? BTR_INSERT_IGNORE_UNIQUE_OP
 			: BTR_INSERT_OP;
 		break;
-	case BTR_DELETE:
-		btr_op = BTR_DELETE_OP;
-		ut_a(cursor->purge_node);
-		break;
 	case BTR_DELETE_MARK:
 		btr_op = BTR_DELMARK_OP;
 		break;
 	default:
-		/* only one of BTR_INSERT, BTR_DELETE, BTR_DELETE_MARK
+		/* only one of BTR_INSERT, BTR_DELETE_MARK
 		should be specified at a time */
 		ut_error;
 	}
@@ -1530,9 +1525,7 @@ btr_cur_search_to_nth_level_func(
 			/* Try to buffer the operation if the leaf
 			page is not in the buffer pool. */
 
-			buf_mode = btr_op == BTR_DELETE_OP
-				? BUF_GET_IF_IN_POOL_OR_WATCH
-				: BUF_GET_IF_IN_POOL;
+			buf_mode = BUF_GET_IF_IN_POOL;
 		}
 	}
 
@@ -1599,30 +1592,6 @@ btr_cur_search_to_nth_level_func(
 
 			break;
 
-		case BTR_DELETE_OP:
-			ut_ad(buf_mode == BUF_GET_IF_IN_POOL_OR_WATCH);
-			ut_ad(!dict_index_is_spatial(index));
-
-			if (!row_purge_poss_sec(cursor->purge_node,
-						index, tuple)) {
-
-				/* The record cannot be purged yet. */
-				cursor->flag = BTR_CUR_DELETE_REF;
-			} else if (ibuf_insert(IBUF_OP_DELETE, tuple,
-					       index, page_id, zip_size,
-					       cursor->thr)) {
-
-				/* The purge was buffered. */
-				cursor->flag = BTR_CUR_DELETE_IBUF;
-			} else {
-				/* The purge could not be buffered. */
-				buf_pool_watch_unset(page_id);
-				break;
-			}
-
-			buf_pool_watch_unset(page_id);
-			goto func_exit;
-
 		default:
 			ut_error;
 		}
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index d34fe4371cc..b3027ececc6 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -1805,7 +1805,6 @@ buf_chunk_not_freed(
 		ibool	ready;
 
 		switch (buf_block_get_state(block)) {
-		case BUF_BLOCK_POOL_WATCH:
 		case BUF_BLOCK_ZIP_PAGE:
 		case BUF_BLOCK_ZIP_DIRTY:
 			/* The uncompressed buffer pool should never
@@ -2002,13 +2001,6 @@ buf_pool_init_instance(
 		buf_pool->no_flush[i] = os_event_create(0);
 	}
 
-	buf_pool->watch = (buf_page_t*) ut_zalloc_nokey(
-		sizeof(*buf_pool->watch) * BUF_POOL_WATCH_SIZE);
-	for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
-		buf_pool->watch[i].buf_pool_index
-			= unsigned(buf_pool->instance_no);
-	}
-
 	/* All fields are initialized by ut_zalloc_nokey(). */
 
 	buf_pool->try_LRU_scan = TRUE;
@@ -2081,9 +2073,6 @@ buf_pool_free_instance(
 		}
 	}
 
-	ut_free(buf_pool->watch);
-	buf_pool->watch = NULL;
-
 	chunks = buf_pool->chunks;
 	chunk = chunks + buf_pool->n_chunks;
 
@@ -3322,10 +3311,8 @@ buf_relocate(
 	ut_ad(bpage->in_page_hash);
 	ut_ad(bpage == buf_page_hash_get_low(buf_pool, bpage->id));
 
-	ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
 #ifdef UNIV_DEBUG
 	switch (buf_page_get_state(bpage)) {
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
 	case BUF_BLOCK_FILE_PAGE:
@@ -3461,241 +3448,6 @@ LRUItr::start()
 	return(m_hp);
 }
 
-/** Determine if a block is a sentinel for a buffer pool watch.
-@param[in]	buf_pool	buffer pool instance
-@param[in]	bpage		block
-@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
-ibool
-buf_pool_watch_is_sentinel(
-	const buf_pool_t*	buf_pool,
-	const buf_page_t*	bpage)
-{
-	/* We must also own the appropriate hash lock. */
-	ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage));
-	ut_ad(buf_page_in_file(bpage));
-
-	if (bpage < &buf_pool->watch[0]
-	    || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
-
-		ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
-		      || bpage->zip.data != NULL);
-
-		return(FALSE);
-	}
-
-	ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
-	ut_ad(!bpage->in_zip_hash);
-	ut_ad(bpage->in_page_hash);
-	ut_ad(bpage->zip.data == NULL);
-	return(TRUE);
-}
-
-/** Add watch for the given page to be read in. Caller must have
-appropriate hash_lock for the bpage. This function may release the
-hash_lock and reacquire it.
-@param[in]	page_id		page id
-@param[in,out]	hash_lock	hash_lock currently latched
-@return NULL if watch set, block if the page is in the buffer pool */
-static
-buf_page_t*
-buf_pool_watch_set(
-	const page_id_t		page_id,
-	rw_lock_t**		hash_lock)
-{
-	buf_page_t*	bpage;
-	ulint		i;
-	buf_pool_t*	buf_pool = buf_pool_get(page_id);
-
-	ut_ad(*hash_lock == buf_page_hash_lock_get(buf_pool, page_id));
-
-	ut_ad(rw_lock_own(*hash_lock, RW_LOCK_X));
-
-	bpage = buf_page_hash_get_low(buf_pool, page_id);
-
-	if (bpage != NULL) {
-page_found:
-		if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
-			/* The page was loaded meanwhile. */
-			return(bpage);
-		}
-
-		/* Add to an existing watch. */
-		bpage->fix();
-		return(NULL);
-	}
-
-	/* From this point this function becomes fairly heavy in terms
-	of latching. We acquire the buf_pool mutex as well as all the
-	hash_locks. buf_pool mutex is needed because any changes to
-	the page_hash must be covered by it and hash_locks are needed
-	because we don't want to read any stale information in
-	buf_pool->watch[]. However, it is not in the critical code path
-	as this function will be called only by the purge thread. */
-
-	/* To obey latching order first release the hash_lock. */
-	rw_lock_x_unlock(*hash_lock);
-
-	buf_pool_mutex_enter(buf_pool);
-	hash_lock_x_all(buf_pool->page_hash);
-
-	/* If not own buf_pool_mutex, page_hash can be changed. */
-	*hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
-
-	/* We have to recheck that the page
-	was not loaded or a watch set by some other
-	purge thread. This is because of the small
-	time window between when we release the
-	hash_lock to acquire buf_pool mutex above. */
-
-	bpage = buf_page_hash_get_low(buf_pool, page_id);
-	if (UNIV_LIKELY_NULL(bpage)) {
-		buf_pool_mutex_exit(buf_pool);
-		hash_unlock_x_all_but(buf_pool->page_hash, *hash_lock);
-		goto page_found;
-	}
-
-	/* The maximum number of purge threads should never exceed
-	BUF_POOL_WATCH_SIZE. So there is no way for purge thread
-	instance to hold a watch when setting another watch. */
-	for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
-		bpage = &buf_pool->watch[i];
-
-		ut_ad(bpage->access_time == 0);
-		ut_ad(bpage->newest_modification == 0);
-		ut_ad(bpage->oldest_modification == 0);
-		ut_ad(bpage->zip.data == NULL);
-		ut_ad(!bpage->in_zip_hash);
-
-		switch (bpage->state) {
-		case BUF_BLOCK_POOL_WATCH:
-			ut_ad(!bpage->in_page_hash);
-			ut_ad(bpage->buf_fix_count == 0);
-
-			/* bpage is pointing to buf_pool->watch[],
-			which is protected by buf_pool->mutex.
-			Normally, buf_page_t objects are protected by
-			buf_block_t::mutex or buf_pool->zip_mutex or both. */
-
-			bpage->state = BUF_BLOCK_ZIP_PAGE;
-			bpage->id = page_id;
-			bpage->buf_fix_count = 1;
-
-			ut_d(bpage->in_page_hash = TRUE);
-			HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
-				    page_id.fold(), bpage);
-
-			buf_pool_mutex_exit(buf_pool);
-			/* Once the sentinel is in the page_hash we can
-			safely release all locks except just the
-			relevant hash_lock */
-			hash_unlock_x_all_but(buf_pool->page_hash,
-						*hash_lock);
-
-			return(NULL);
-		case BUF_BLOCK_ZIP_PAGE:
-			ut_ad(bpage->in_page_hash);
-			ut_ad(bpage->buf_fix_count > 0);
-			break;
-		default:
-			ut_error;
-		}
-	}
-
-	/* Allocation failed.  Either the maximum number of purge
-	threads should never exceed BUF_POOL_WATCH_SIZE, or this code
-	should be modified to return a special non-NULL value and the
-	caller should purge the record directly. */
-	ut_error;
-
-	/* Fix compiler warning */
-	return(NULL);
-}
-
-/** Remove the sentinel block for the watch before replacing it with a
-real block. buf_page_watch_clear() or buf_page_watch_occurred() will notice
-that the block has been replaced with the real block.
-@param[in,out]	buf_pool	buffer pool instance
-@param[in,out]	watch		sentinel for watch
-@return reference count, to be added to the replacement block */
-static
-void
-buf_pool_watch_remove(
-	buf_pool_t*	buf_pool,
-	buf_page_t*	watch)
-{
-#ifdef UNIV_DEBUG
-	/* We must also own the appropriate hash_bucket mutex. */
-	rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, watch->id);
-	ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
-#endif /* UNIV_DEBUG */
-
-	ut_ad(buf_pool_mutex_own(buf_pool));
-
-	HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, watch->id.fold(),
-		    watch);
-	ut_d(watch->in_page_hash = FALSE);
-	watch->buf_fix_count = 0;
-	watch->state = BUF_BLOCK_POOL_WATCH;
-}
-
-/** Stop watching if the page has been read in.
-buf_pool_watch_set(same_page_id) must have returned NULL before.
-@param[in]	page_id	page id */
-void buf_pool_watch_unset(const page_id_t page_id)
-{
-	buf_page_t*	bpage;
-	buf_pool_t*	buf_pool = buf_pool_get(page_id);
-
-	/* We only need to have buf_pool mutex in case where we end
-	up calling buf_pool_watch_remove but to obey latching order
-	we acquire it here before acquiring hash_lock. This should
-	not cause too much grief as this function is only ever
-	called from the purge thread. */
-	buf_pool_mutex_enter(buf_pool);
-
-	rw_lock_t*	hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
-	rw_lock_x_lock(hash_lock);
-
-	/* The page must exist because buf_pool_watch_set()
-	increments buf_fix_count. */
-	bpage = buf_page_hash_get_low(buf_pool, page_id);
-
-	if (bpage->unfix() == 0
-	    && buf_pool_watch_is_sentinel(buf_pool, bpage)) {
-		buf_pool_watch_remove(buf_pool, bpage);
-	}
-
-	buf_pool_mutex_exit(buf_pool);
-	rw_lock_x_unlock(hash_lock);
-}
-
-/** Check if the page has been read in.
-This may only be called after buf_pool_watch_set(same_page_id)
-has returned NULL and before invoking buf_pool_watch_unset(same_page_id).
-@param[in]	page_id	page id
-@return false if the given page was not read in, true if it was */
-bool buf_pool_watch_occurred(const page_id_t page_id)
-{
-	bool		ret;
-	buf_page_t*	bpage;
-	buf_pool_t*	buf_pool = buf_pool_get(page_id);
-	rw_lock_t*	hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
-
-	rw_lock_s_lock(hash_lock);
-
-	/* If not own buf_pool_mutex, page_hash can be changed. */
-	hash_lock = buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id);
-
-	/* The page must exist because buf_pool_watch_set()
-	increments buf_fix_count. */
-	bpage = buf_page_hash_get_low(buf_pool, page_id);
-
-	ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
-	rw_lock_s_unlock(hash_lock);
-
-	return(ret);
-}
-
 /********************************************************************//**
 Moves a page to the start of the buffer pool LRU list. This high-level
 function can be used to prevent an important page from slipping out of
@@ -3756,7 +3508,6 @@ buf_page_t* buf_page_set_file_page_was_freed(const page_id_t page_id)
 
 	if (bpage) {
 		BPageMutex*	block_mutex = buf_page_get_mutex(bpage);
-		ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
 		mutex_enter(block_mutex);
 		rw_lock_s_unlock(hash_lock);
 		/* bpage->file_page_was_freed can already hold
@@ -3783,7 +3534,6 @@ buf_page_t* buf_page_reset_file_page_was_freed(const page_id_t page_id)
 	bpage = buf_page_hash_get_s_locked(buf_pool, page_id, &hash_lock);
 	if (bpage) {
 		BPageMutex*	block_mutex = buf_page_get_mutex(bpage);
-		ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
 		mutex_enter(block_mutex);
 		rw_lock_s_unlock(hash_lock);
 		bpage->file_page_was_freed = FALSE;
@@ -3850,7 +3600,6 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
 		bpage = buf_page_hash_get_s_locked(buf_pool, page_id,
 						   &hash_lock);
 		if (bpage) {
-			ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
 			break;
 		}
 
@@ -3880,8 +3629,6 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
 		return(NULL);
 	}
 
-	ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
-
 	switch (buf_page_get_state(bpage)) {
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
@@ -4251,7 +3998,7 @@ buf_wait_for_read(
 @param[in]	buf_pool	buffer pool instance
 @param[in]	bpage		block which was read from file
 @param[in]	mode		BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
+BUF_PEEK_IF_IN_POOL, or BUF_GET_NO_LATCH
 @param[in]	file		file name
 @param[in]	line		line where called
 @param[out]	err		DB_SUCCESS or error code
@@ -4439,7 +4186,7 @@ buf_block_for_zip_page(
 @param[in]	rw_latch		RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
 @param[in]	guess			guessed block or NULL
 @param[in]	mode			BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
+BUF_PEEK_IF_IN_POOL, or BUF_GET_NO_LATCH
 @param[in]	file			file name
 @param[in]	line			line where called
 @param[in]	mtr			mini-transaction
@@ -4477,8 +4224,7 @@ buf_page_get_gen(
 	      || (rw_latch == RW_NO_LATCH));
 	ut_ad(!allow_ibuf_merge
 	      || mode == BUF_GET
-	      || mode == BUF_GET_IF_IN_POOL
-	      || mode == BUF_GET_IF_IN_POOL_OR_WATCH);
+	      || mode == BUF_GET_IF_IN_POOL);
 
 	if (err) {
 		*err = DB_SUCCESS;
@@ -4503,7 +4249,6 @@ buf_page_get_gen(
 		ut_ad(rw_latch == RW_NO_LATCH);
 		/* fall through */
 	case BUF_GET:
-	case BUF_GET_IF_IN_POOL_OR_WATCH:
 	case BUF_GET_POSSIBLY_FREED:
 		fil_space_t* s = fil_space_acquire_for_io(page_id.space());
 		ut_ad(s);
@@ -4547,62 +4292,13 @@ buf_page_get_gen(
 		block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id);
 	}
 
-	if (!block || buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
+	if (!block) {
 		rw_lock_s_unlock(hash_lock);
-		block = NULL;
-	}
-
-	if (block == NULL) {
 
 		/* Page not in buf_pool: needs to be read from file */
 
-		if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
-			rw_lock_x_lock(hash_lock);
-
-			/* If not own buf_pool_mutex,
-			page_hash can be changed. */
-			hash_lock = buf_page_hash_lock_x_confirm(
-				hash_lock, buf_pool, page_id);
-
-			block = (buf_block_t*) buf_pool_watch_set(
-				page_id, &hash_lock);
-
-			if (block) {
-				/* We can release hash_lock after we
-				increment the fix count to make
-				sure that no state change takes place. */
-				fix_block = block;
-
-				if (fsp_is_system_temporary(page_id.space())) {
-					/* For temporary tablespace,
-					the mutex is being used for
-					synchronization between user
-					thread and flush thread,
-					instead of block->lock. See
-					buf_flush_page() for the flush
-					thread counterpart. */
-
-					BPageMutex*	fix_mutex
-						= buf_page_get_mutex(
-							&fix_block->page);
-					mutex_enter(fix_mutex);
-					fix_block->fix();
-					mutex_exit(fix_mutex);
-				} else {
-					fix_block->fix();
-				}
-
-				/* Now safe to release page_hash mutex */
-				rw_lock_x_unlock(hash_lock);
-				goto got_block;
-			}
-
-			rw_lock_x_unlock(hash_lock);
-		}
-
 		switch (mode) {
 		case BUF_GET_IF_IN_POOL:
-		case BUF_GET_IF_IN_POOL_OR_WATCH:
 		case BUF_PEEK_IF_IN_POOL:
 		case BUF_EVICT_IF_IN_POOL:
 			ut_ad(!rw_lock_own_flagged(
@@ -4710,7 +4406,6 @@ buf_page_get_gen(
 	/* Now safe to release page_hash mutex */
 	rw_lock_s_unlock(hash_lock);
 
-got_block:
 	switch (mode) {
 	default:
 		ut_ad(block->zip_size() == zip_size);
@@ -4799,7 +4494,6 @@ buf_page_get_gen(
 		ut_ad(zip_err == SUCCESS);
 		break;
 
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
 	case BUF_BLOCK_MEMORY:
@@ -4818,7 +4512,7 @@ buf_page_get_gen(
 
 #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
 
-	if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
+	if (mode == BUF_GET_IF_IN_POOL
 	    && (ibuf_debug || buf_debug_execute_is_force_flush())) {
 
 		/* Try to evict the block from the buffer pool, to use the
@@ -4848,25 +4542,15 @@ buf_page_get_gen(
 			hash_lock = buf_page_hash_lock_x_confirm(
 				hash_lock, buf_pool, page_id);
 
-			if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
-				/* Set the watch, as it would have
-				been set if the page were not in the
-				buffer pool in the first place. */
-				block = (buf_block_t*) buf_pool_watch_set(
-					page_id, &hash_lock);
-			} else {
-				block = (buf_block_t*) buf_page_hash_get_low(
-					buf_pool, page_id);
-			}
+			block = (buf_block_t*) buf_page_hash_get_low(
+				buf_pool, page_id);
 
 			rw_lock_x_unlock(hash_lock);
 
 			if (block != NULL) {
-				/* Either the page has been read in or
-				a watch was set on that in the window
-				where we released the buf_pool::mutex
-				and before we acquire the hash_lock
-				above. Try again. */
+				/* The page was read between us
+				invoking buf_pool_mutex_exit()
+				and acquiring hash_lock above. Try again. */
 				guess = block;
 
 				goto loop;
@@ -5259,8 +4943,6 @@ buf_page_try_get_func(
 		return(NULL);
 	}
 
-	ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
-
 	buf_page_mutex_enter(block);
 	rw_lock_s_unlock(hash_lock);
 
@@ -5349,8 +5031,6 @@ buf_page_init(
 	ulint			zip_size,
 	buf_block_t*		block)
 {
-	buf_page_t*	hash_page;
-
 	ut_ad(buf_pool == buf_pool_get(page_id));
 	ut_ad(buf_pool_mutex_own(buf_pool));
 
@@ -5380,35 +5060,7 @@ buf_page_init(
 	buf_page_init_low(&block->page);
 
 	/* Insert into the hash table of file pages */
-
-	hash_page = buf_page_hash_get_low(buf_pool, page_id);
-
-	if (hash_page == NULL) {
-		/* Block not found in hash table */
-	} else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) {
-		/* Preserve the reference count. */
-		ib_uint32_t	buf_fix_count = hash_page->buf_fix_count;
-
-		ut_a(buf_fix_count > 0);
-
-		block->page.buf_fix_count += buf_fix_count;
-
-		buf_pool_watch_remove(buf_pool, hash_page);
-	} else {
-
-		ib::error() << "Page " << page_id
-			<< " already found in the hash table: "
-			<< hash_page << ", " << block;
-
-		ut_d(buf_page_mutex_exit(block));
-		ut_d(buf_pool_mutex_exit(buf_pool));
-		ut_d(buf_print());
-		ut_d(buf_LRU_print());
-		ut_d(buf_validate());
-		ut_d(buf_LRU_validate());
-		ut_error;
-	}
-
+	DBUG_ASSERT(!buf_page_hash_get_low(buf_pool, page_id));
 	ut_ad(!block->page.in_zip_hash);
 	ut_ad(!block->page.in_page_hash);
 	ut_d(block->page.in_page_hash = TRUE);
@@ -5447,7 +5099,6 @@ buf_page_init_for_read(
 {
 	buf_block_t*	block;
 	buf_page_t*	bpage	= NULL;
-	buf_page_t*	watch_page;
 	rw_lock_t*	hash_lock;
 	mtr_t		mtr;
 	bool		lru	= false;
@@ -5489,10 +5140,8 @@ buf_page_init_for_read(
 	hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
 	rw_lock_x_lock(hash_lock);
 
-	watch_page = buf_page_hash_get_low(buf_pool, page_id);
-	if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
+	if (buf_page_hash_get_low(buf_pool, page_id)) {
 		/* The page is already in the buffer pool. */
-		watch_page = NULL;
 		rw_lock_x_unlock(hash_lock);
 		if (block) {
 			buf_page_mutex_enter(block);
@@ -5573,22 +5222,14 @@ buf_page_init_for_read(
 		/* If buf_buddy_alloc() allocated storage from the LRU list,
 		it released and reacquired buf_pool->mutex.  Thus, we must
 		check the page_hash again, as it may have been modified. */
-		if (UNIV_UNLIKELY(lru)) {
-
-			watch_page = buf_page_hash_get_low(buf_pool, page_id);
-
-			if (UNIV_UNLIKELY(watch_page
-			    && !buf_pool_watch_is_sentinel(buf_pool,
-							   watch_page))) {
-
-				/* The block was added by some other thread. */
-				rw_lock_x_unlock(hash_lock);
-				watch_page = NULL;
-				buf_buddy_free(buf_pool, data, zip_size);
+		if (UNIV_UNLIKELY(lru)
+		    && buf_page_hash_get_low(buf_pool, page_id)) {
+			/* The block was added by some other thread. */
+			rw_lock_x_unlock(hash_lock);
+			buf_buddy_free(buf_pool, data, zip_size);
 
-				bpage = NULL;
-				goto func_exit;
-			}
+			bpage = NULL;
+			goto func_exit;
 		}
 
 		bpage = buf_page_alloc_descriptor();
@@ -5618,21 +5259,6 @@ buf_page_init_for_read(
 
 		ut_d(bpage->in_page_hash = TRUE);
 
-		if (watch_page != NULL) {
-
-			/* Preserve the reference count. */
-			ib_uint32_t	buf_fix_count;
-
-			buf_fix_count = watch_page->buf_fix_count;
-
-			ut_a(buf_fix_count > 0);
-
-			bpage->buf_fix_count += buf_fix_count;
-
-			ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
-			buf_pool_watch_remove(buf_pool, watch_page);
-		}
-
 		HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
 			    bpage->id.fold(), bpage);
 
@@ -5699,8 +5325,7 @@ buf_page_create(
 	block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id);
 
 	if (block
-	    && buf_page_in_file(&block->page)
-	    && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
+	    && buf_page_in_file(&block->page)) {
 		ut_d(block->page.file_page_was_freed = FALSE);
 
 		/* Page can be found in buf_pool */
@@ -6508,7 +6133,6 @@ buf_pool_validate_instance(
 			buf_page_mutex_enter(block);
 
 			switch (buf_block_get_state(block)) {
-			case BUF_BLOCK_POOL_WATCH:
 			case BUF_BLOCK_ZIP_PAGE:
 			case BUF_BLOCK_ZIP_DIRTY:
 				/* These should only occur on
@@ -6619,8 +6243,7 @@ buf_pool_validate_instance(
 		ut_a(b->oldest_modification);
 		n_flush++;
 
-		switch (buf_page_get_state(b)) {
-		case BUF_BLOCK_ZIP_DIRTY:
+		if (buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY) {
 			n_lru++;
 			n_zip++;
 			switch (buf_page_get_io_fix(b)) {
@@ -6644,18 +6267,8 @@ buf_pool_validate_instance(
 				}
 				break;
 			}
-			break;
-		case BUF_BLOCK_FILE_PAGE:
-			/* uncompressed page */
-			break;
-		case BUF_BLOCK_POOL_WATCH:
-		case BUF_BLOCK_ZIP_PAGE:
-		case BUF_BLOCK_NOT_USED:
-		case BUF_BLOCK_READY_FOR_USE:
-		case BUF_BLOCK_MEMORY:
-		case BUF_BLOCK_REMOVE_HASH:
-			ut_error;
-			break;
+		} else {
+			ut_ad(buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
 		}
 		ut_a(buf_page_hash_get_low(buf_pool, b->id) == b);
 	}
@@ -6894,25 +6507,15 @@ buf_get_latched_pages_number_instance(
 	for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
 	     b = UT_LIST_GET_NEXT(list, b)) {
 		ut_ad(b->in_flush_list);
-
-		switch (buf_page_get_state(b)) {
-		case BUF_BLOCK_ZIP_DIRTY:
+		ut_ad(buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY
+		      || buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
+		if (buf_page_get_state(b) == BUF_BLOCK_ZIP_DIRTY) {
 			if (b->buf_fix_count != 0
 			    || buf_page_get_io_fix(b) != BUF_IO_NONE) {
 				fixed_pages_number++;
 			}
-			break;
-		case BUF_BLOCK_FILE_PAGE:
-			/* uncompressed page */
-			break;
-		case BUF_BLOCK_POOL_WATCH:
-		case BUF_BLOCK_ZIP_PAGE:
-		case BUF_BLOCK_NOT_USED:
-		case BUF_BLOCK_READY_FOR_USE:
-		case BUF_BLOCK_MEMORY:
-		case BUF_BLOCK_REMOVE_HASH:
-			ut_error;
-			break;
+		} else {
+			ut_ad(buf_page_get_state(b) == BUF_BLOCK_FILE_PAGE);
 		}
 	}
 
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 658d023c9c7..db4e813617a 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -571,7 +571,6 @@ buf_flush_remove(
 	buf_pool->flush_hp.adjust(bpage);
 
 	switch (buf_page_get_state(bpage)) {
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_PAGE:
 		/* Clean compressed pages should not be on the flush list */
 	case BUF_BLOCK_NOT_USED:
@@ -998,7 +997,6 @@ buf_flush_write_block_low(
 	}
 
 	switch (buf_page_get_state(bpage)) {
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_PAGE: /* The page should be dirty. */
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 79fafa68c4c..9704fa7be5c 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -1976,7 +1976,6 @@ buf_LRU_block_remove_hashed(
 		ut_a(bpage->oldest_modification == 0);
 		UNIV_MEM_ASSERT_W(bpage->zip.data, bpage->zip_size());
 		break;
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_DIRTY:
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
@@ -2099,7 +2098,6 @@ buf_LRU_block_remove_hashed(
 
 		return(true);
 
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_DIRTY:
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
@@ -2333,7 +2331,6 @@ buf_LRU_validate_instance(
              bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
 
 		switch (buf_page_get_state(bpage)) {
-		case BUF_BLOCK_POOL_WATCH:
 		case BUF_BLOCK_NOT_USED:
 		case BUF_BLOCK_READY_FOR_USE:
 		case BUF_BLOCK_MEMORY:
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index e017f3977d0..6d37683c85c 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -442,7 +442,7 @@ static const char* innodb_change_buffering_names[] = {
 	"inserts",	/* IBUF_USE_INSERT */
 	"deletes",	/* IBUF_USE_DELETE_MARK */
 	"changes",	/* IBUF_USE_INSERT_DELETE_MARK */
-	"purges",	/* IBUF_USE_DELETE */
+	"purges",	/* IBUF_USE_DELETE (same as IBUF_USE_DELETE_MARK) */
 	"all",		/* IBUF_USE_ALL */
 	NullS
 };
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index e7d3d091dd4..e8612e412c8 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -2617,69 +2617,23 @@ ibuf_contract_after_insert(
 	} while (size > 0 && sum_sizes < entry_size);
 }
 
-/*********************************************************************//**
-Determine if an insert buffer record has been encountered already.
-@return TRUE if a new record, FALSE if possible duplicate */
-static
-ibool
-ibuf_get_volume_buffered_hash(
-/*==========================*/
-	const rec_t*	rec,	/*!< in: ibuf record in post-4.1 format */
-	const byte*	types,	/*!< in: fields */
-	const byte*	data,	/*!< in: start of user record data */
-	ulint		comp,	/*!< in: 0=ROW_FORMAT=REDUNDANT,
-				nonzero=ROW_FORMAT=COMPACT */
-	ulint*		hash,	/*!< in/out: hash array */
-	ulint		size)	/*!< in: number of elements in hash array */
-{
-	ulint		len;
-	ulint		fold;
-	ulint		bitmask;
-
-	len = ibuf_rec_get_size(
-		rec, types,
-		rec_get_n_fields_old(rec) - IBUF_REC_FIELD_USER, comp);
-	fold = ut_fold_binary(data, len);
-
-	hash += (fold / (CHAR_BIT * sizeof *hash)) % size;
-	bitmask = static_cast<ulint>(1) << (fold % (CHAR_BIT * sizeof(*hash)));
-
-	if (*hash & bitmask) {
-
-		return(FALSE);
-	}
-
-	/* We have not seen this record yet.  Insert it. */
-	*hash |= bitmask;
-
-	return(TRUE);
-}
-
 #ifdef UNIV_DEBUG
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs)	\
-	ibuf_get_volume_buffered_count_func(mtr,rec,hash,size,n_recs)
+# define ibuf_get_volume_buffered_count(mtr, rec) \
+	ibuf_get_volume_buffered_count_func(mtr, rec)
 #else /* UNIV_DEBUG */
-# define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs)	\
-	ibuf_get_volume_buffered_count_func(rec,hash,size,n_recs)
+# define ibuf_get_volume_buffered_count(mtr, rec) \
+	ibuf_get_volume_buffered_count_func(rec)
 #endif /* UNIV_DEBUG */
 
-/*********************************************************************//**
-Update the estimate of the number of records on a page, and
-get the space taken by merging the buffered record to the index page.
+/** Determine the space taken by merging the buffered record to the index page.
+@param rec	change buffer record
 @return size of index record in bytes + an upper limit of the space
 taken in the page directory */
-static
-ulint
-ibuf_get_volume_buffered_count_func(
-/*================================*/
+static ulint ibuf_get_volume_buffered_count_func(
 #ifdef UNIV_DEBUG
-	mtr_t*		mtr,	/*!< in: mini-transaction owning rec */
-#endif /* UNIV_DEBUG */
-	const rec_t*	rec,	/*!< in: insert buffer record */
-	ulint*		hash,	/*!< in/out: hash array */
-	ulint		size,	/*!< in: number of elements in hash array */
-	lint*		n_recs)	/*!< in/out: estimated number of records
-				on the page that rec points to */
+	mtr_t* mtr, /*!< mini-transaction */
+#endif
+	const rec_t* rec)
 {
 	ulint		len;
 	ibuf_op_t	ibuf_op;
@@ -2715,21 +2669,12 @@ ibuf_get_volume_buffered_count_func(
 	default:
 		ut_error;
 	case 0:
-		/* This ROW_TYPE=REDUNDANT record does not include an
-		operation counter.  Exclude it from the *n_recs,
-		because deletes cannot be buffered if there are
-		old-style inserts buffered for the page. */
-
 		len = ibuf_rec_get_size(rec, types, n_fields, 0);
 
 		return(len
 		       + rec_get_converted_extra_size(len, n_fields, 0)
 		       + page_dir_calc_reserved_space(1));
 	case 1:
-		/* This ROW_TYPE=COMPACT record does not include an
-		operation counter.  Exclude it from the *n_recs,
-		because deletes cannot be buffered if there are
-		old-style inserts buffered for the page. */
 		goto get_volume_comp;
 
 	case IBUF_REC_INFO_SIZE:
@@ -2739,35 +2684,14 @@ ibuf_get_volume_buffered_count_func(
 
 	switch (ibuf_op) {
 	case IBUF_OP_INSERT:
-		/* Inserts can be done by updating a delete-marked record.
-		Because delete-mark and insert operations can be pointing to
-		the same records, we must not count duplicates. */
-	case IBUF_OP_DELETE_MARK:
-		/* There must be a record to delete-mark.
-		See if this record has been already buffered. */
-		if (n_recs && ibuf_get_volume_buffered_hash(
-			    rec, types + IBUF_REC_INFO_SIZE,
-			    types + len,
-			    types[IBUF_REC_OFFSET_FLAGS] & IBUF_REC_COMPACT,
-			    hash, size)) {
-			(*n_recs)++;
-		}
-
-		if (ibuf_op == IBUF_OP_DELETE_MARK) {
-			/* Setting the delete-mark flag does not
-			affect the available space on the page. */
-			return(0);
-		}
 		break;
+	case IBUF_OP_DELETE_MARK:
+		return 0;
 	case IBUF_OP_DELETE:
-		/* A record will be removed from the page. */
-		if (n_recs) {
-			(*n_recs)--;
-		}
 		/* While deleting a record actually frees up space,
 		we have to play it safe and pretend that it takes no
 		additional space (the record might not exist, etc.). */
-		return(0);
+		return 0;
 	default:
 		ut_error;
 	}
@@ -2810,9 +2734,6 @@ ibuf_get_volume_buffered(
 				or BTR_MODIFY_TREE */
 	ulint		space,	/*!< in: space id */
 	ulint		page_no,/*!< in: page number of an index page */
-	lint*		n_recs,	/*!< in/out: minimum number of records on the
-				page after the buffered changes have been
-				applied, or NULL to disable the counting */
 	mtr_t*		mtr)	/*!< in: mini-transaction of pcur */
 {
 	ulint		volume;
@@ -2822,8 +2743,6 @@ ibuf_get_volume_buffered(
 	const page_t*	prev_page;
 	ulint		next_page_no;
 	const page_t*	next_page;
-	/* bitmap of buffered recs */
-	ulint		hash_bitmap[128 / sizeof(ulint)];
 
 	ut_ad((pcur->latch_mode == BTR_MODIFY_PREV)
 	      || (pcur->latch_mode == BTR_MODIFY_TREE));
@@ -2833,10 +2752,6 @@ ibuf_get_volume_buffered(
 
 	volume = 0;
 
-	if (n_recs) {
-		memset(hash_bitmap, 0, sizeof hash_bitmap);
-	}
-
 	rec = btr_pcur_get_rec(pcur);
 	page = page_align(rec);
 	ut_ad(page_validate(page, ibuf.index));
@@ -2855,9 +2770,7 @@ ibuf_get_volume_buffered(
 			goto count_later;
 		}
 
-		volume += ibuf_get_volume_buffered_count(
-			mtr, rec,
-			hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+		volume += ibuf_get_volume_buffered_count(mtr, rec);
 	}
 
 	/* Look at the previous page */
@@ -2907,9 +2820,7 @@ ibuf_get_volume_buffered(
 			goto count_later;
 		}
 
-		volume += ibuf_get_volume_buffered_count(
-			mtr, rec,
-			hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+		volume += ibuf_get_volume_buffered_count(mtr, rec);
 	}
 
 count_later:
@@ -2927,9 +2838,7 @@ ibuf_get_volume_buffered(
 			return(volume);
 		}
 
-		volume += ibuf_get_volume_buffered_count(
-			mtr, rec,
-			hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+		volume += ibuf_get_volume_buffered_count(mtr, rec);
 	}
 
 	/* Look at the next page */
@@ -2977,9 +2886,7 @@ ibuf_get_volume_buffered(
 			return(volume);
 		}
 
-		volume += ibuf_get_volume_buffered_count(
-			mtr, rec,
-			hash_bitmap, UT_ARR_SIZE(hash_bitmap), n_recs);
+		volume += ibuf_get_volume_buffered_count(mtr, rec);
 	}
 }
 
@@ -3212,7 +3119,6 @@ ibuf_insert_low(
 	mem_heap_t*	heap;
 	ulint*		offsets		= NULL;
 	ulint		buffered;
-	lint		min_n_recs;
 	rec_t*		ins_rec;
 	ibool		old_bit_value;
 	page_t*		bitmap_page;
@@ -3231,7 +3137,7 @@ ibuf_insert_low(
 	ut_ad(dtuple_check_typed(entry));
 	ut_ad(!no_counter || op == IBUF_OP_INSERT);
 	ut_ad(page_id.space() == index->table->space_id);
-	ut_a(op < IBUF_OP_COUNT);
+	ut_ad(op == IBUF_OP_INSERT || op == IBUF_OP_DELETE_MARK);
 
 	do_merge = FALSE;
 
@@ -3303,45 +3209,12 @@ ibuf_insert_low(
 
 	/* Find out the volume of already buffered inserts for the same index
 	page */
-	min_n_recs = 0;
 	buffered = ibuf_get_volume_buffered(&pcur,
 					    page_id.space(),
-					    page_id.page_no(),
-					    op == IBUF_OP_DELETE
-					    ? &min_n_recs
-					    : NULL, &mtr);
+					    page_id.page_no(), &mtr);
 
 	const ulint physical_size = zip_size ? zip_size : srv_page_size;
 
-	if (op == IBUF_OP_DELETE
-	    && (min_n_recs < 2 || buf_pool_watch_occurred(page_id))) {
-		/* The page could become empty after the record is
-		deleted, or the page has been read in to the buffer
-		pool.  Refuse to buffer the operation. */
-
-		/* The buffer pool watch is needed for IBUF_OP_DELETE
-		because of latching order considerations.  We can
-		check buf_pool_watch_occurred() only after latching
-		the insert buffer B-tree pages that contain buffered
-		changes for the page.  We never buffer IBUF_OP_DELETE,
-		unless some IBUF_OP_INSERT or IBUF_OP_DELETE_MARK have
-		been previously buffered for the page.  Because there
-		are buffered operations for the page, the insert
-		buffer B-tree page latches held by mtr will guarantee
-		that no changes for the user page will be merged
-		before mtr_commit(&mtr).  We must not mtr_commit(&mtr)
-		until after the IBUF_OP_DELETE has been buffered. */
-
-fail_exit:
-		if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) {
-			mutex_exit(&ibuf_mutex);
-			mutex_exit(&ibuf_pessimistic_insert_mutex);
-		}
-
-		err = DB_STRONG_FAIL;
-		goto func_exit;
-	}
-
 	/* After this point, the page could still be loaded to the
 	buffer pool, but we do not have to care about it, since we are
 	holding a latch on the insert buffer leaf page that contains
@@ -3363,7 +3236,14 @@ ibuf_insert_low(
 					   page_id.page_no())) {
 
 		ibuf_mtr_commit(&bitmap_mtr);
-		goto fail_exit;
+fail_exit:
+		if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) {
+			mutex_exit(&ibuf_mutex);
+			mutex_exit(&ibuf_pessimistic_insert_mutex);
+		}
+
+		err = DB_STRONG_FAIL;
+		goto func_exit;
 	}
 
 	if (op == IBUF_OP_INSERT) {
@@ -3561,7 +3441,7 @@ ibuf_insert(
 		case IBUF_USE_INSERT:
 		case IBUF_USE_INSERT_DELETE_MARK:
 		case IBUF_USE_ALL:
-			goto check_watch;
+			goto mode_ok;
 		}
 		break;
 	case IBUF_OP_DELETE_MARK:
@@ -3574,22 +3454,10 @@ ibuf_insert(
 		case IBUF_USE_INSERT_DELETE_MARK:
 		case IBUF_USE_ALL:
 			ut_ad(!no_counter);
-			goto check_watch;
+			goto mode_ok;
 		}
 		break;
 	case IBUF_OP_DELETE:
-		switch (use) {
-		case IBUF_USE_NONE:
-		case IBUF_USE_INSERT:
-		case IBUF_USE_INSERT_DELETE_MARK:
-			DBUG_RETURN(false);
-		case IBUF_USE_DELETE_MARK:
-		case IBUF_USE_DELETE:
-		case IBUF_USE_ALL:
-			ut_ad(!no_counter);
-			goto skip_watch;
-		}
-		break;
 	case IBUF_OP_COUNT:
 		break;
 	}
@@ -3597,36 +3465,7 @@ ibuf_insert(
 	/* unknown op or use */
 	ut_error;
 
-check_watch:
-	/* If a thread attempts to buffer an insert on a page while a
-	purge is in progress on the same page, the purge must not be
-	buffered, because it could remove a record that was
-	re-inserted later.  For simplicity, we block the buffering of
-	all operations on a page that has a purge pending.
-
-	We do not check this in the IBUF_OP_DELETE case, because that
-	would always trigger the buffer pool watch during purge and
-	thus prevent the buffering of delete operations.  We assume
-	that the issuer of IBUF_OP_DELETE has called
-	buf_pool_watch_set(space, page_no). */
-
-	{
-		buf_pool_t*	buf_pool = buf_pool_get(page_id);
-		buf_page_t*	bpage
-			= buf_page_get_also_watch(buf_pool, page_id);
-
-		if (bpage != NULL) {
-			/* A buffer pool watch has been set or the
-			page has been read into the buffer pool.
-			Do not buffer the request.  If a purge operation
-			is being buffered, have this request executed
-			directly on the page in the buffer pool after the
-			buffered entries for this page have been merged. */
-			DBUG_RETURN(false);
-		}
-	}
-
-skip_watch:
+mode_ok:
 	entry_size = rec_get_converted_size(index, entry, 0);
 
 	if (entry_size
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 1185ab0ee86..31a1472b503 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -76,7 +76,7 @@ enum btr_latch_mode {
 	/** Continue searching the entire B-tree. */
 	BTR_CONT_SEARCH_TREE = 38,
 
-	/* BTR_INSERT, BTR_DELETE and BTR_DELETE_MARK are mutually
+	/* BTR_INSERT and BTR_DELETE_MARK are mutually
 	exclusive. */
 	/** The search tuple will be inserted to the secondary index
 	at the searched position.  When the leaf page is not in the
@@ -88,10 +88,6 @@ enum btr_latch_mode {
 	not in the buffer pool. */
 	BTR_DELETE_MARK	= 4096,
 
-	/** Try to purge the record using the change buffer when the
-	secondary index leaf page is not in the buffer pool. */
-	BTR_DELETE = 8192,
-
 	/** The caller is already holding dict_index_t::lock S-latch. */
 	BTR_ALREADY_S_LATCHED = 16384,
 	/** Search and S-latch a leaf page, assuming that the
@@ -114,11 +110,10 @@ enum btr_latch_mode {
 	BTR_DELETE_MARK_LEAF_ALREADY_S_LATCHED = BTR_DELETE_MARK_LEAF
 	| BTR_ALREADY_S_LATCHED,
 	/** Attempt to purge a secondary index record. */
-	BTR_PURGE_LEAF = BTR_MODIFY_LEAF | BTR_DELETE,
+	BTR_PURGE_LEAF = BTR_MODIFY_LEAF,
 	/** Attempt to purge a secondary index record
 	while holding the dict_index_t::lock S-latch. */
-	BTR_PURGE_LEAF_ALREADY_S_LATCHED = BTR_PURGE_LEAF
-	| BTR_ALREADY_S_LATCHED,
+	BTR_PURGE_LEAF_ALREADY_S_LATCHED = BTR_MODIFY_LEAF_ALREADY_S_LATCHED,
 
 	/** In the case of BTR_MODIFY_TREE, the caller specifies
 	the intention to delete record only. It is used to optimize
@@ -159,7 +154,6 @@ record is in spatial index */
 				| BTR_DELETE_MARK		\
 				| BTR_RTREE_UNDO_INS		\
 				| BTR_RTREE_DELETE_MARK		\
-				| BTR_DELETE			\
 				| BTR_ESTIMATE			\
 				| BTR_IGNORE_SEC_UNIQUE		\
 				| BTR_ALREADY_S_LATCHED		\
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 9d25e7e583b..4170c855262 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -192,7 +192,7 @@ btr_cur_search_to_nth_level_func(
 				search the position! */
 	ulint		latch_mode, /*!< in: BTR_SEARCH_LEAF, ..., ORed with
 				at most one of BTR_INSERT, BTR_DELETE_MARK,
-				BTR_DELETE, or BTR_ESTIMATE;
+				or BTR_ESTIMATE;
 				cursor->left_block is used to store a pointer
 				to the left neighbor page, in the cases
 				BTR_SEARCH_PREV and BTR_MODIFY_PREV;
@@ -882,9 +882,8 @@ enum btr_cur_method {
 				the insert buffer */
 	BTR_CUR_DEL_MARK_IBUF,	/*!< performed the intended delete
 				mark in the insert/delete buffer */
-	BTR_CUR_DELETE_IBUF,	/*!< performed the intended delete in
+	BTR_CUR_DELETE_IBUF	/*!< performed the intended delete in
 				the insert/delete buffer */
-	BTR_CUR_DELETE_REF	/*!< row_purge_poss_sec() failed */
 };
 
 /** The tree cursor: the definition appears here only for the compiler
@@ -892,7 +891,6 @@ to know struct size! */
 struct btr_cur_t {
 	dict_index_t*	index;		/*!< index where positioned */
 	page_cur_t	page_cur;	/*!< page cursor */
-	purge_node_t*	purge_node;	/*!< purge node, for BTR_DELETE */
 	buf_block_t*	left_block;	/*!< this field is used to store
 					a pointer to the left neighbor
 					page, in the cases
@@ -962,7 +960,6 @@ struct btr_cur_t {
 	{
 		index = NULL;
 		memset(&page_cur, 0, sizeof page_cur);
-		purge_node = NULL;
 		left_block = NULL;
 		thr = NULL;
 		flag = btr_cur_method(0);
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 1916c0047a4..0c0e333887c 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -58,10 +58,6 @@ struct fil_addr_t;
 					it is error-prone programming
 					not to set a latch, and it
 					should be used with care */
-#define BUF_GET_IF_IN_POOL_OR_WATCH	15
-					/*!< Get the page only if it's in the
-					buffer pool, if not then set a watch
-					on the page. */
 #define BUF_GET_POSSIBLY_FREED		16
 					/*!< Like BUF_GET, but do not mind
 					if the file page has been freed. */
@@ -85,9 +81,6 @@ struct fil_addr_t;
 					/*!< The maximum number of buffer
 					pools that can be defined */
 
-#define BUF_POOL_WATCH_SIZE		(srv_n_purge_threads + 1)
-					/*!< Maximum number of concurrent
-					buffer pool watches */
 #define MAX_PAGE_HASH_LOCKS	1024	/*!< The maximum number of
 					page_hash locks */
 
@@ -111,8 +104,6 @@ extern my_bool	buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
 
 The enumeration values must be 0..7. */
 enum buf_page_state {
-	BUF_BLOCK_POOL_WATCH,		/*!< a sentinel for the buffer pool
-					watch, element of buf_pool->watch[] */
 	BUF_BLOCK_ZIP_PAGE,		/*!< contains a clean
 					compressed page */
 	BUF_BLOCK_ZIP_DIRTY,		/*!< contains a compressed
@@ -445,7 +436,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size);
 @param[in]	rw_latch		RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
 @param[in]	guess			guessed block or NULL
 @param[in]	mode			BUF_GET, BUF_GET_IF_IN_POOL,
-BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
+BUF_PEEK_IF_IN_POOL, or BUF_GET_NO_LATCH
 @param[in]	file			file name
 @param[in]	line			line where called
 @param[in]	mtr			mini-transaction
@@ -1257,17 +1248,14 @@ found, NULL otherwise. If NULL is passed then the hash_lock is released by
 this function.
 @param[in]	lock_mode	RW_LOCK_X or RW_LOCK_S. Ignored if
 lock == NULL
-@param[in]	watch		if true, return watch sentinel also.
-@return pointer to the bpage or NULL; if NULL, lock is also NULL or
-a watch sentinel. */
+@return pointer to the bpage or NULL; if NULL, lock is also NULL. */
 UNIV_INLINE
 buf_page_t*
 buf_page_hash_get_locked(
 	buf_pool_t*		buf_pool,
 	const page_id_t		page_id,
 	rw_lock_t**		lock,
-	ulint			lock_mode,
-	bool			watch = false);
+	ulint			lock_mode);
 
 /** Returns the control block of a file page, NULL if not found.
 If the block is found and lock is not NULL then the appropriate
@@ -1304,9 +1292,6 @@ buf_page_hash_get_low() function.
 	buf_page_hash_get_locked(b, page_id, l, RW_LOCK_X)
 #define buf_page_hash_get(b, page_id)				\
 	buf_page_hash_get_locked(b, page_id, NULL, 0)
-#define buf_page_get_also_watch(b, page_id)			\
-	buf_page_hash_get_locked(b, page_id, NULL, 0, true)
-
 #define buf_block_hash_get_s_locked(b, page_id, l)		\
 	buf_block_hash_get_locked(b, page_id, l, RW_LOCK_S)
 #define buf_block_hash_get_x_locked(b, page_id, l)		\
@@ -1314,29 +1299,6 @@ buf_page_hash_get_low() function.
 #define buf_block_hash_get(b, page_id)				\
 	buf_block_hash_get_locked(b, page_id, NULL, 0)
 
-/********************************************************************//**
-Determine if a block is a sentinel for a buffer pool watch.
-@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
-ibool
-buf_pool_watch_is_sentinel(
-/*=======================*/
-	const buf_pool_t*	buf_pool,	/*!< buffer pool instance */
-	const buf_page_t*	bpage)		/*!< in: block */
-	MY_ATTRIBUTE((nonnull, warn_unused_result));
-
-/** Stop watching if the page has been read in.
-buf_pool_watch_set(space,offset) must have returned NULL before.
-@param[in]	page_id	page id */
-void buf_pool_watch_unset(const page_id_t page_id);
-
-/** Check if the page has been read in.
-This may only be called after buf_pool_watch_set(space,offset)
-has returned NULL and before invoking buf_pool_watch_unset(space,offset).
-@param[in]	page_id	page id
-@return FALSE if the given page was not read in, TRUE if it was */
-bool buf_pool_watch_occurred(const page_id_t page_id)
-MY_ATTRIBUTE((warn_unused_result));
-
 /********************************************************************//**
 Get total buffer pool statistics. */
 void
@@ -1498,10 +1460,7 @@ class buf_page_t {
 	/* @} */
 	page_zip_des_t	zip;		/*!< compressed page; zip.data
 					(but not the data it points to) is
-					also protected by buf_pool->mutex;
-					state == BUF_BLOCK_ZIP_PAGE and
-					zip.data == NULL means an active
-					buf_pool->watch */
+					also protected by buf_pool->mutex */
 
 	ulint           write_size;	/* Write size is set when this
 					page is first time written and then
@@ -2243,11 +2202,6 @@ struct buf_pool_t{
 #endif
 	/* @} */
 
-	buf_page_t*			watch;
-					/*!< Sentinel records for buffer
-					pool watches. Protected by
-					buf_pool->mutex. */
-
 	/** Temporary memory for page_compressed and encrypted I/O */
 	struct io_buf_t {
 		/** number of elements in slots[] */
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 970119edd6e..2f5cd8e727a 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -217,7 +217,6 @@ buf_page_get_state(
 
 #ifdef UNIV_DEBUG
 	switch (state) {
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
 	case BUF_BLOCK_NOT_USED:
@@ -258,8 +257,6 @@ buf_get_state_name(
 	enum buf_page_state	state = buf_page_get_state(&block->page);
 
 	switch (state) {
-	case BUF_BLOCK_POOL_WATCH:
-		return (const char *) "BUF_BLOCK_POOL_WATCH";
 	case BUF_BLOCK_ZIP_PAGE:
 		return (const char *) "BUF_BLOCK_ZIP_PAGE";
 	case BUF_BLOCK_ZIP_DIRTY:
@@ -292,9 +289,6 @@ buf_page_set_state(
 	enum buf_page_state	old_state	= buf_page_get_state(bpage);
 
 	switch (old_state) {
-	case BUF_BLOCK_POOL_WATCH:
-		ut_error;
-		break;
 	case BUF_BLOCK_ZIP_PAGE:
 		ut_a(state == BUF_BLOCK_ZIP_DIRTY);
 		break;
@@ -362,9 +356,6 @@ buf_page_in_file(
 	const buf_page_t*	bpage)	/*!< in: pointer to control block */
 {
 	switch (buf_page_get_state(bpage)) {
-	case BUF_BLOCK_POOL_WATCH:
-		ut_error;
-		break;
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
 	case BUF_BLOCK_FILE_PAGE:
@@ -406,9 +397,6 @@ buf_page_get_mutex(
 	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
 
 	switch (buf_page_get_state(bpage)) {
-	case BUF_BLOCK_POOL_WATCH:
-		ut_error;
-		return(NULL);
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
 		return(&buf_pool->zip_mutex);
@@ -726,11 +714,9 @@ buf_block_get_frame(
 	}
 
 	switch (buf_block_get_state(block)) {
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_ZIP_PAGE:
 	case BUF_BLOCK_ZIP_DIRTY:
 	case BUF_BLOCK_NOT_USED:
-		ut_error;
 		break;
 	case BUF_BLOCK_FILE_PAGE:
 		ut_a(block->page.buf_fix_count > 0);
@@ -1071,17 +1057,14 @@ found, NULL otherwise. If NULL is passed then the hash_lock is released by
 this function.
 @param[in]	lock_mode	RW_LOCK_X or RW_LOCK_S. Ignored if
 lock == NULL
-@param[in]	watch		if true, return watch sentinel also.
-@return pointer to the bpage or NULL; if NULL, lock is also NULL or
-a watch sentinel. */
+@return pointer to the bpage or NULL; if NULL, lock is also NULL */
 UNIV_INLINE
 buf_page_t*
 buf_page_hash_get_locked(
 	buf_pool_t*		buf_pool,
 	const page_id_t		page_id,
 	rw_lock_t**		lock,
-	ulint			lock_mode,
-	bool			watch)
+	ulint			lock_mode)
 {
 	buf_page_t*	bpage = NULL;
 	rw_lock_t*	hash_lock;
@@ -1114,10 +1097,7 @@ buf_page_hash_get_locked(
 
 	bpage = buf_page_hash_get_low(buf_pool, page_id);
 
-	if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) {
-		if (!watch) {
-			bpage = NULL;
-		}
+	if (!bpage) {
 		goto unlock_and_exit;
 	}
 
@@ -1239,7 +1219,6 @@ buf_page_release_zip(
 		reinterpret_cast<buf_block_t*>(bpage)->unfix();
 		return;
 
-	case BUF_BLOCK_POOL_WATCH:
 	case BUF_BLOCK_NOT_USED:
 	case BUF_BLOCK_READY_FOR_USE:
 	case BUF_BLOCK_MEMORY:
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index 356e120a7bc..130cf43ab5f 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -41,6 +41,7 @@ ibuf_insert(). DO NOT CHANGE THE VALUES OF THESE, THEY ARE STORED ON DISK. */
 typedef enum {
 	IBUF_OP_INSERT = 0,
 	IBUF_OP_DELETE_MARK = 1,
+	/** This one could exist in the change buffer after an upgrade */
 	IBUF_OP_DELETE = 2,
 
 	/* Number of different operation types. */
@@ -54,7 +55,8 @@ enum ibuf_use_t {
 	IBUF_USE_INSERT,	/* insert */
 	IBUF_USE_DELETE_MARK,	/* delete */
 	IBUF_USE_INSERT_DELETE_MARK,	/* insert+delete */
-	IBUF_USE_DELETE,	/* delete+purge */
+	/** same as IBUF_USE_DELETE_MARK */
+	IBUF_USE_DELETE,
 	IBUF_USE_ALL		/* insert+delete+purge */
 };
 
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index 3fe6a6a82d7..870ab4bfb86 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -1,7 +1,7 @@
 /*****************************************************************************
 
 Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2017, MariaDB Corporation.
+Copyright (c) 2016, 2019, MariaDB Corporation.
 
 This program is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free Software
@@ -354,13 +354,11 @@ row_parse_int(
 enum row_search_result {
 	ROW_FOUND = 0,		/*!< the record was found */
 	ROW_NOT_FOUND,		/*!< record not found */
-	ROW_BUFFERED,		/*!< one of BTR_INSERT, BTR_DELETE, or
+	ROW_BUFFERED		/*!< one of BTR_INSERT or
 				BTR_DELETE_MARK was specified, the
 				secondary index leaf page was not in
 				the buffer pool, and the operation was
 				enqueued in the insert/delete buffer */
-	ROW_NOT_DELETED_REF	/*!< BTR_DELETE was specified, and
-				row_purge_poss_sec() failed */
 };
 
 /***************************************************************//**
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index caffeab6af0..dbae7d10594 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1868,7 +1868,6 @@ row_log_table_apply_delete_low(
 			      pcur, mtr);
 #ifdef UNIV_DEBUG
 		switch (btr_pcur_get_btr_cur(pcur)->flag) {
-		case BTR_CUR_DELETE_REF:
 		case BTR_CUR_DEL_MARK_IBUF:
 		case BTR_CUR_DELETE_IBUF:
 		case BTR_CUR_INSERT_TO_IBUF:
@@ -1949,7 +1948,6 @@ row_log_table_apply_delete(
 		      &pcur, &mtr);
 #ifdef UNIV_DEBUG
 	switch (btr_pcur_get_btr_cur(&pcur)->flag) {
-	case BTR_CUR_DELETE_REF:
 	case BTR_CUR_DEL_MARK_IBUF:
 	case BTR_CUR_DELETE_IBUF:
 	case BTR_CUR_INSERT_TO_IBUF:
@@ -2095,7 +2093,6 @@ row_log_table_apply_update(
 		      BTR_MODIFY_TREE, &pcur, &mtr);
 #ifdef UNIV_DEBUG
 	switch (btr_pcur_get_btr_cur(&pcur)->flag) {
-	case BTR_CUR_DELETE_REF:
 	case BTR_CUR_DEL_MARK_IBUF:
 	case BTR_CUR_DELETE_IBUF:
 	case BTR_CUR_INSERT_TO_IBUF:
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 04dfcbad0d3..3b101a2a29d 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -434,10 +434,9 @@ row_purge_remove_sec_if_poss_tree(
 	case ROW_FOUND:
 		break;
 	case ROW_BUFFERED:
-	case ROW_NOT_DELETED_REF:
-		/* These are invalid outcomes, because the mode passed
+		/* This is invalid, because the mode passed
 		to row_search_index_entry() did not include any of the
-		flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+		flags BTR_INSERT or BTR_DELETE_MARK. */
 		ut_error;
 	}
 
@@ -556,16 +555,8 @@ row_purge_remove_sec_if_poss_leaf(
 			: BTR_PURGE_LEAF;
 	}
 
-	/* Set the purge node for the call to row_purge_poss_sec(). */
-	pcur.btr_cur.purge_node = node;
 	if (dict_index_is_spatial(index)) {
 		rw_lock_sx_lock(dict_index_get_lock(index));
-		pcur.btr_cur.thr = NULL;
-	} else {
-		/* Set the query thread, so that ibuf_insert_low() will be
-		able to invoke thd_get_trx(). */
-		pcur.btr_cur.thr = static_cast<que_thr_t*>(
-			que_node_get_parent(node));
 	}
 
 	search_result = row_search_index_entry(
@@ -654,8 +645,6 @@ row_purge_remove_sec_if_poss_leaf(
 		/* (The index entry is still needed,
 		or the deletion succeeded) */
 		/* fall through */
-	case ROW_NOT_DELETED_REF:
-		/* The index entry is still needed. */
 	case ROW_BUFFERED:
 		/* The deletion was buffered. */
 	case ROW_NOT_FOUND:
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 57e2c8e4fdb..2ec23fb65f0 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1314,10 +1314,6 @@ row_search_index_entry(
 	}
 
 	switch (btr_pcur_get_btr_cur(pcur)->flag) {
-	case BTR_CUR_DELETE_REF:
-		ut_a(mode & BTR_DELETE && !dict_index_is_spatial(index));
-		return(ROW_NOT_DELETED_REF);
-
 	case BTR_CUR_DEL_MARK_IBUF:
 	case BTR_CUR_DELETE_IBUF:
 	case BTR_CUR_INSERT_TO_IBUF:
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 88dea6c7995..bdeaabe8131 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -295,10 +295,9 @@ row_undo_ins_remove_sec_low(
 		break;
 
 	case ROW_BUFFERED:
-	case ROW_NOT_DELETED_REF:
-		/* These are invalid outcomes, because the mode passed
+		/* This is invalid, because the mode passed
 		to row_search_index_entry() did not include any of the
-		flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+		flags BTR_INSERT or BTR_DELETE_MARK. */
 		ut_error;
 	}
 
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 9820fc3b06e..4d3633487b3 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -580,10 +580,9 @@ row_undo_mod_del_mark_or_remove_sec_low(
 	case ROW_FOUND:
 		break;
 	case ROW_BUFFERED:
-	case ROW_NOT_DELETED_REF:
-		/* These are invalid outcomes, because the mode passed
+		/* This is invalid, because the mode passed
 		to row_search_index_entry() did not include any of the
-		flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+		flags BTR_INSERT or BTR_DELETE_MARK. */
 		ut_error;
 	}
 
@@ -758,10 +757,9 @@ row_undo_mod_del_unmark_sec_and_undo_update(
 		mem_heap_t*	offsets_heap;
 		ulint*		offsets;
 	case ROW_BUFFERED:
-	case ROW_NOT_DELETED_REF:
-		/* These are invalid outcomes, because the mode passed
+		/* This is invalid, because the mode passed
 		to row_search_index_entry() did not include any of the
-		flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
+		flags BTR_INSERT or BTR_DELETE_MARK. */
 		ut_error;
 	case ROW_NOT_FOUND:
 		/* For spatial index, if first search didn't find an
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index db39b486d0e..f9200132304 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -2388,9 +2388,6 @@ row_upd_sec_index_entry(
 	rec = btr_cur_get_rec(btr_cur);
 
 	switch (search_result) {
-	case ROW_NOT_DELETED_REF:	/* should only occur for BTR_DELETE */
-		ut_error;
-		break;
 	case ROW_BUFFERED:
 		/* Entry was delete marked already. */
 		break;
-- 
2.23.0

