Thrill  0.1
prefetch_pool.hpp
Go to the documentation of this file.
1 /***************************************************************************
2  * foxxll/mng/prefetch_pool.hpp
3  *
4  * Part of FOXXLL. See http://foxxll.org
5  *
6  * Copyright (C) 2003-2004 Roman Dementiev <[email protected]>
7  * Copyright (C) 2009 Andreas Beckmann <[email protected]>
8  *
9  * Distributed under the Boost Software License, Version 1.0.
10  * (See accompanying file LICENSE_1_0.txt or copy at
11  * http://www.boost.org/LICENSE_1_0.txt)
12  **************************************************************************/
13 
14 #ifndef FOXXLL_MNG_PREFETCH_POOL_HEADER
15 #define FOXXLL_MNG_PREFETCH_POOL_HEADER
16 
17 #include <algorithm>
18 #include <list>
19 #include <unordered_map>
20 #include <utility>
21 
22 #include <tlx/logger/core.hpp>
23 
24 #include <foxxll/config.hpp>
26 
27 namespace foxxll {
28 
29 //! \addtogroup foxxll_schedlayer
30 //! \{
31 
32 //! Implements dynamically resizable prefetching pool.
33 template <class BlockType>
35 {
36  constexpr static bool debug = false;
37 
38 public:
39  using block_type = BlockType;
40  using bid_type = typename block_type::bid_type;
41 
42 protected:
43  struct bid_hash
44  {
45  size_t operator () (const bid_type& bid) const noexcept
46  {
47  size_t result = size_t(bid.storage) +
48  size_t(bid.offset & 0xffffffff) +
49  size_t(bid.offset >> 32);
50  return result;
51  }
52 #if FOXXLL_MSVC
53  bool operator () (const bid_type& a, const bid_type& b) const noexcept
54  {
55  return (a.storage < b.storage) || (a.storage == b.storage && a.offset < b.offset);
56  }
57  enum
58  { // parameters for hash table
59  bucket_size = 4, // 0 < bucket_size
60  min_buckets = 8 // min_buckets = 2 ^^ N, 0 < N
61  };
62 #endif
63  };
64  using busy_entry = std::pair<block_type*, request_ptr>;
65  using unordered_map_type = typename std::unordered_map<bid_type, busy_entry, bid_hash>;
66  using free_blocks_iterator = typename std::list<block_type*>::iterator;
67  using busy_blocks_iterator = typename unordered_map_type::iterator;
68 
69  //! contains free prefetch blocks
70  std::list<block_type*> free_blocks;
71 
72  //! blocks that are in reading or already read but not retrieved by user
74 
75  //! count number of free blocks, since traversing the std::list is slow.
77 
78 public:
79  //! Constructs pool.
80  //! \param init_size initial number of blocks in the pool
81  explicit prefetch_pool(size_t init_size = 1)
82  : free_blocks_size(init_size)
83  {
84  size_t i = 0;
85  for ( ; i < init_size; ++i)
86  free_blocks.push_back(new block_type);
87  }
88 
89  //! non-copyable: delete copy-constructor
90  prefetch_pool(const prefetch_pool&) = delete;
91  //! non-copyable: delete assignment operator
92  prefetch_pool& operator = (const prefetch_pool&) = delete;
93 
94  void swap(prefetch_pool& obj)
95  {
96  std::swap(free_blocks, obj.free_blocks);
97  std::swap(busy_blocks, obj.busy_blocks);
98  std::swap(free_blocks_size, obj.free_blocks_size);
99  }
100 
101  //! Waits for completion of all ongoing read requests and frees memory.
102  virtual ~prefetch_pool()
103  {
104  while (!free_blocks.empty())
105  {
106  delete free_blocks.back();
107  free_blocks.pop_back();
108  }
109 
110  try
111  {
112  busy_blocks_iterator i2 = busy_blocks.begin();
113  for ( ; i2 != busy_blocks.end(); ++i2)
114  {
115  i2->second.second->wait();
116  delete i2->second.first;
117  }
118  }
119  catch (...)
120  { }
121  }
122 
123  //! Returns number of owned blocks.
124  size_t size() const
125  {
126  return free_blocks_size + busy_blocks.size();
127  }
128 
129  //! Returns the number of free prefetching blocks.
130  size_t free_size() const
131  {
132  return free_blocks_size;
133  }
134 
135  //! Returns the number of busy prefetching blocks.
136  size_t busy_size() const
137  {
138  return busy_blocks.size();
139  }
140 
141  //! Add a new block to prefetch pool, enlarges size of pool.
142  void add(block_type*& block)
143  {
144  free_blocks.push_back(block);
146  block = nullptr; // prevent caller from using the block any further
147  }
148 
149  //! Take out a block from the pool, one unhinted free block must be
150  //! available.
151  //! \return pointer to the block. Ownership of the block goes to the caller.
153  {
154  tlx_die_unless(!free_blocks.empty());
155 
156  block_type* p = free_blocks.back();
157  free_blocks.pop_back();
159  return p;
160  }
161 
162  /*!
163  * Gives a hint for prefetching a block, the block may or may not be read
164  * into a prefetch buffer.
165  *
166  * \param bid address of a block to be prefetched
167  * \return \c true if there was a free block to do prefetch and
168  * prefetching was scheduled, \c false otherwise
169  *
170  * \note If there are no free blocks available (all blocks are already in
171  * reading or read but not retrieved by user calling \c read method)
172  * calling \c hint function has no effect
173  */
174  bool hint(bid_type bid)
175  {
176  // if block is already hinted, no need to hint it again
177  if (in_prefetching(bid)) {
178  TLX_LOG << "prefetch_pool::hint2 bid=" << bid << " was already cached";
179  return true;
180  }
181 
182  if (free_blocks_size) // only if we have a free block
183  {
185  block_type* block = free_blocks.back();
186  free_blocks.pop_back();
187  TLX_LOG << "prefetch_pool::hint bid=" << bid << " => prefetching";
188  request_ptr req = block->read(bid);
189  busy_blocks[bid] = busy_entry(block, req);
190  return true;
191  }
192  TLX_LOG << "prefetch_pool::hint bid=" << bid << " => no free blocks for prefetching";
193  return false;
194  }
195 
196  /*!
197  * Gives a hint for prefetching a block, the block may or may not be read
198  * into a prefetch buffer. This variant checks if the write pool is
199  * currently writing said block.
200  *
201  * \param bid address of a block to be prefetched
202  * \return \c true if there was a free block to do prefetch and
203  * prefetching was scheduled, \c false otherwise
204  *
205  * \param w_pool The corresponding write pool; so the method can check
206  * if the block is maybe still in RAM.
207  *
208  * \note If there are no free blocks available (all blocks are already in
209  * reading or read but not retrieved by user calling \c read method)
210  * calling \c hint function has no effect
211  */
213  {
214  // if block is already hinted, no need to hint it again
215  if (in_prefetching(bid)) {
216  TLX_LOG << "prefetch_pool::hint2 bid=" << bid << " was already cached";
217  return true;
218  }
219 
220  if (free_blocks_size) // only if we have a free block
221  {
223  block_type* block = free_blocks.back();
224  free_blocks.pop_back();
225  if (w_pool.has_request(bid))
226  {
227  busy_entry wp_request = w_pool.steal_request(bid);
228  TLX_LOG << "prefetch_pool::hint2 bid=" << bid << " was in write cache at " << wp_request.first;
229  assert(wp_request.first != 0);
230  w_pool.add(block); //in exchange
231  busy_blocks[bid] = wp_request;
232  return true;
233  }
234  TLX_LOG << "prefetch_pool::hint2 bid=" << bid << " => prefetching";
235  request_ptr req = block->read(bid);
236  busy_blocks[bid] = busy_entry(block, req);
237  return true;
238  }
239  TLX_LOG << "prefetch_pool::hint2 bid=" << bid << " => no free blocks for prefetching";
240  return false;
241  }
242 
243  //! Cancel a hint request in case the block is no longer desired.
245  {
246  busy_blocks_iterator cache_el = busy_blocks.find(bid);
247  if (cache_el == busy_blocks.end())
248  return false;
249 
250  // cancel request if it is a read request, there might be
251  // write requests 'stolen' from a write_pool that may not be canceled
252  if (cache_el->second.second->op() == request::READ)
253  cache_el->second.second->cancel();
254  // finish the request
255  cache_el->second.second->wait();
257  free_blocks.push_back(cache_el->second.first);
258  busy_blocks.erase(cache_el);
259  return true;
260  }
261 
262  //! Checks if a block is in the hinted block set.
264  {
265  return (busy_blocks.find(bid) != busy_blocks.end());
266  }
267 
268  //! Returns the request pointer for a hinted block, or an invalid nullptr
269  //! request in case it was not requested due to lack of prefetch buffers.
271  {
272  busy_blocks_iterator cache_el = busy_blocks.find(bid);
273 
274  if (cache_el == busy_blocks.end())
275  return request_ptr(); // invalid pointer
276  else
277  return cache_el->second.second;
278  }
279 
280  //! Returns true if the blocks was hinted and the request is finished.
281  bool poll(bid_type bid)
282  {
283  request_ptr req = find(bid);
284  return req.valid() ? req->poll() : false;
285  }
286 
287  /*!
288  * Reads block. If this block is cached block is not read but passed from
289  * the cache.
290  *
291  * \param block block object, where data to be read to. If block was cached
292  * \c block 's ownership goes to the pool and block from cache is returned
293  * in \c block value.
294  *
295  * \param bid address of the block
296  *
297  * \warning \c block parameter must be allocated dynamically using \c new .
298  *
299  * \return request pointer object of read operation
300  */
302  {
303  busy_blocks_iterator cache_el = busy_blocks.find(bid);
304  if (cache_el == busy_blocks.end())
305  {
306  // not cached
307  TLX_LOG << "prefetch_pool::read bid=" << bid << " => no copy in cache, retrieving to " << block;
308  return block->read(bid);
309  }
310 
311  // cached
312  TLX_LOG << "prefetch_pool::read bid=" << bid << " => copy in cache exists";
314  free_blocks.push_back(block);
315  block = cache_el->second.first;
316  request_ptr result = cache_el->second.second;
317  busy_blocks.erase(cache_el);
318  return result;
319  }
320 
322  {
323  // try cache
324  busy_blocks_iterator cache_el = busy_blocks.find(bid);
325  if (cache_el != busy_blocks.end())
326  {
327  // cached
328  TLX_LOG << "prefetch_pool::read bid=" << bid << " => copy in cache exists";
330  free_blocks.push_back(block);
331  block = cache_el->second.first;
332  request_ptr result = cache_el->second.second;
333  busy_blocks.erase(cache_el);
334  return result;
335  }
336 
337  // try w_pool cache
338  if (w_pool.has_request(bid))
339  {
340  busy_entry wp_request = w_pool.steal_request(bid);
341  TLX_LOG << "prefetch_pool::read bid=" << bid << " was in write cache at " << wp_request.first;
342  assert(wp_request.first != 0);
343  w_pool.add(block); //in exchange
344  block = wp_request.first;
345  return wp_request.second;
346  }
347 
348  // not cached
349  TLX_LOG << "prefetch_pool::read bid=" << bid << " => no copy in cache, retrieving to " << block;
350  return block->read(bid);
351  }
352 
353  //! Resizes size of the pool.
354  //! \param new_size desired size of the pool. If some
355  //! blocks are used for prefetching, these blocks can't be freed.
356  //! Only free blocks (not in prefetching) can be freed by reducing
357  //! the size of the pool calling this method.
358  //! \return new size of the pool
359  size_t resize(size_t new_size)
360  {
361  int64_t diff = int64_t(new_size) - int64_t(size());
362  if (diff > 0)
363  {
364  free_blocks_size += diff;
365  while (--diff >= 0)
366  free_blocks.push_back(new block_type);
367 
368  return size();
369  }
370 
371  while (diff < 0 && free_blocks_size > 0)
372  {
373  ++diff;
375  delete free_blocks.back();
376  free_blocks.pop_back();
377  }
378  return size();
379  }
380 };
381 
382 //! \}
383 
384 } // namespace foxxll
385 
386 namespace std {
387 
388 template <class BlockType>
391 {
392  a.swap(b);
393 }
394 
395 } // namespace std
396 
397 #endif // !FOXXLL_MNG_PREFETCH_POOL_HEADER
398 
399 /**************************************************************************/
Implements dynamically resizable prefetching pool.
bool invalidate(bid_type bid)
Cancel a hint request in case the block is no longer desired.
typename std::list< block_type * >::iterator free_blocks_iterator
bool has_request(bid_type bid)
Definition: write_pool.hpp:195
request_ptr read(block_type *&block, bid_type bid)
Reads block.
size_t size() const
Returns number of owned blocks.
request_ptr find(bid_type bid)
tlx::counting_ptr< request > request_ptr
A reference counting pointer for request.
Definition: request.hpp:43
prefetch_pool & operator=(const prefetch_pool &)=delete
non-copyable: delete assignment operator
Implements dynamically resizable buffered writing pool.
Definition: write_pool.hpp:38
std::list< block_type * > free_blocks
contains free prefetch blocks
std::pair< block_type *, request_ptr > steal_request(bid_type bid)
Definition: write_pool.hpp:206
void add(block_type *&block)
Add a new block to prefetch pool, enlarges size of pool.
STL namespace.
bool valid() const noexcept
test for a non-nullptr pointer
unordered_map_type busy_blocks
blocks that are in reading or already read but not retrieved by user
typename unordered_map_type::iterator busy_blocks_iterator
static constexpr bool debug
size_t resize(size_t new_size)
bool in_prefetching(bid_type bid)
Checks if a block is in the hinted block set.
FOXXLL library namespace
typename block_type::bid_type bid_type
virtual ~prefetch_pool()
Waits for completion of all ongoing read requests and frees memory.
size_t busy_size() const
Returns the number of busy prefetching blocks.
request_ptr read(block_type *&block, bid_type bid, write_pool< block_type > &w_pool)
High-performance smart pointer used as a wrapping reference counting pointer.
void swap(prefetch_pool &obj)
typename std::unordered_map< bid_type, busy_entry, bid_hash > unordered_map_type
void add(block_type *&block)
Definition: write_pool.hpp:227
size_t free_blocks_size
count number of free blocks, since traversing the std::list is slow.
bool poll(bid_type bid)
Returns true if the blocks was hinted and the request is finished.
std::pair< block_type *, request_ptr > busy_entry
prefetch_pool(size_t init_size=1)
size_t free_size() const
Returns the number of free prefetching blocks.
bool hint(bid_type bid)
Gives a hint for prefetching a block, the block may or may not be read into a prefetch buffer...
size_t operator()(const bid_type &bid) const noexcept
bool hint(bid_type bid, write_pool< block_type > &w_pool)
Gives a hint for prefetching a block, the block may or may not be read into a prefetch buffer...
#define TLX_LOG
Default logging method: output if the local debug variable is true.
Definition: core.hpp:141
#define tlx_die_unless(X)
Definition: core.hpp:65