Thrill  0.1
block_scheduler.hpp
Go to the documentation of this file.
1 /***************************************************************************
2  * foxxll/mng/block_scheduler.hpp
3  *
4  * Part of FOXXLL. See http://foxxll.org
5  *
6  * Copyright (C) 2010-2011 Raoul Steffen <[email protected]>
7  *
8  * Distributed under the Boost Software License, Version 1.0.
9  * (See accompanying file LICENSE_1_0.txt or copy at
10  * http://www.boost.org/LICENSE_1_0.txt)
11  **************************************************************************/
12 
13 #ifndef FOXXLL_MNG_BLOCK_SCHEDULER_HEADER
14 #define FOXXLL_MNG_BLOCK_SCHEDULER_HEADER
15 
16 #include <cassert>
17 
18 #include <algorithm>
19 #include <deque>
20 #include <functional>
21 #include <limits>
22 #include <list>
23 #include <map>
24 #include <queue>
25 #include <set>
26 #include <stack>
27 #include <string>
28 #include <utility>
29 #include <vector>
30 
31 #include <tlx/logger/core.hpp>
32 #include <tlx/unused.hpp>
33 
36 
38 
39 namespace foxxll {
40 
41 //! Virtualization of a block of data.
42 //! Holds information for allocating and swapping. To use in cooperation with block_scheduler.
43 //!
44 //! A swappable_block can be uninitialized, i.e. it holds no data.
45 //! When access is required, is has to be acquired first, and released afterwards, so it can be swapped in and out as required.
46 //! If the stored data is no longer needed, it can get uninitialized, freeing both internal and external memory.
47 //! \tparam ValueType type of contained objects (POD with no references to internal memory).
48 //! \tparam BlockSize Number of objects in one block.
49 //! BlockSize*sizeof(ValueType) must be divisible by 4096.
50 template <typename ValueType, size_t BlockSize>
52 {
53  constexpr static bool debug = false;
54 
55 protected:
56  static const size_t raw_block_size = BlockSize * sizeof(ValueType);
57 
58 public:
61 
62 protected:
63  external_block_type external_data; //!external_data.valid if no associated space on disk
64  internal_block_type* internal_data; //nullptr if there is no internal memory reserved
65  bool dirty;
67 
68  static size_t disk_allocation_offset;
69 
71  { block_manager::get_instance()->new_block(striping(), external_data, ++disk_allocation_offset); }
72 
74  {
75  block_manager::get_instance()->delete_block(external_data);
76  external_data = external_block_type(); // make invalid
77  }
78 
79 public:
80  //! Create in uninitialized state.
82  : external_data() /*!valid*/, internal_data(0), dirty(false), reference_count(0) { }
83 
85 
86  //! If it has an internal_block. The internal_block implicitly holds valid data.
87  bool is_internal() const
88  { return (internal_data != nullptr); }
89 
90  //! If the external_block does not hold valid data.
91  bool is_dirty() const
92  { return dirty; }
93 
94  //! If it has an external_block.
95  bool has_external_block() const
96  { return external_data.valid(); }
97 
98  //! If it has an external_block that holds valid data.
99  bool is_external() const
100  { return has_external_block() && ! is_dirty(); }
101 
102  //! If it is acquired.
103  bool is_acquired() const
104  { return reference_count > 0; }
105 
106  //! If it holds an internal_block but does not need it.
107  bool is_evictable() const
108  { return ! is_acquired() && is_internal(); }
109 
110  //! If it has some valid data (in- or external).
111  bool is_initialized() const
112  { return is_internal() || is_external(); }
113 
114  //! Invalidate external data if true.
115  //! \return is_dirty()
116  bool make_dirty_if(const bool make_dirty)
117  {
118  assert(is_acquired());
119  return dirty = make_dirty || dirty;
120  }
121 
122  //! Acquire the block, i.e. add a reference. Has to be internal.
123  //! \return A reference to the data-block.
125  {
126  assert(is_internal());
127  ++reference_count;
128  return *internal_data;
129  }
130 
131  //! Release the block, i.e. subduct a reference. Has to be acquired.
132  void release()
133  {
134  assert(is_acquired());
135  --reference_count;
136  }
137 
138  //! Get a reference to the data-block. Has to be acquired.
140  {
141  assert(is_acquired());
142  return *internal_data;
143  }
144 
145  //! Get a reference to the data-block. Has to be acquired.
147  {
148  assert(is_acquired());
149  return *internal_data;
150  }
151 
152  //! Fill block with default data, is supposed to be overwritten by subclass. Has to be internal.
153  void fill_default() { }
154 
155  //! Read asyncronusly from external_block to internal_block. Has to be internal and have an external_block.
156  //! \return A request pointer to the I/O.
158  {
159  assert(is_internal());
160  assert(has_external_block());
161  TLX_LOG << "reading block";
162  dirty = false;
163  return internal_data->read(external_data, on_cmpl);
164  }
165 
166  //! Read synchronously from external_block to internal_block. Has to be internal and have an external_block.
167  void read_sync()
168  { read_async()->wait(); }
169 
170  //! Write asyncronusly from internal_block to external_block if necessary.
171  //! \return A request pointer to the I/O, an invalid request pointer if not necessary.
173  {
174  if (! is_dirty())
175  return request_ptr();
176  if (! has_external_block())
178 
179  TLX_LOG << "writing block";
180 
181  dirty = false;
182  return internal_data->write(external_data, on_cmpl);
183  }
184 
185  //! Write synchronously from internal_block to external_block if necessary.
186  void clean_sync()
187  {
188  request_ptr rp = clean_async();
189  if (rp.valid())
190  rp->wait();
191  }
192 
193  //! Attach an internal_block, making the block internal. Has to be not internal.
195  {
196  assert(! is_internal());
197  internal_data = iblock;
198  }
199 
200  //! Detach the internal_block. Writes to external_block if necessary. Has to be evictable.
201  //! \return A pointer to the internal_block.
203  {
204  assert(is_evictable());
205  clean_sync();
207  internal_data = 0;
208  return iblock;
209  }
210 
211  //! Bring the block in uninitialized state, freeing external and internal memory.
212  //! Returns a pointer to the internal_block, nullptr if it had none.
213  //! \return A pointer to the freed internal_block, nullptr if it had none.
215  {
216  assert(! is_acquired());
217  dirty = false;
218  // free external_block (so that it becomes invalid and the disk-space can be used again)
219  if (has_external_block())
221  // free internal_block
223  internal_data = 0;
224  return iblock;
225  }
226 
227  //! Set the external_block that holds the swappable_block's data. The block gets initialized with it.
228  //! \param eblock The external_block holding initial data.
230  {
231  assert(! is_initialized());
232  external_data = eblock;
233  }
234 
235  //! Extract the swappable_blocks data in an external_block. The block gets uninitialized.
236  //! \return The external_block that holds the swappable_block's data.
238  {
239  assert(! is_internal());
241  external_data = external_block_type();
242  return eblock;
243  }
244 };
245 
246 template <typename ValueType, size_t BlockSize>
248 
249 template <class SwappableBlockType>
251 
252 template <class SwappableBlockType>
254 
255 //! Schedules swapping of blocks and provides blocks for temporary storage.
256 //!
257 //! In simple mode, it tries to save I/Os through caching only.
258 //! In simulation mode, it records access patterns into a prediction sequence.
259 //! The prediction sequence can then be used for prefetching in the (offline) execute mode.
260 //! This will only work for algorithms with deterministic, data oblivious access patterns.
261 //! In simulation mode, no I/O is performed; the data provided is accessible but undefined.
262 //! In execute mode, it does caching, prefetching, and possibly other optimizations.
263 //! \tparam SwappableBlockType Type of swappable_blocks to manage. Can be some specialized subclass.
264 template <class SwappableBlockType>
266 {
267  constexpr static bool debug = false;
268 
269 protected:
270  // tuning-parameter: To acquire blocks, internal memory has to be allocated.
271  // This constant limits the number of internal_blocks allocated at once.
273 
274  using time_type = size_t;
275 
276 public:
277  using internal_block_type = typename SwappableBlockType::internal_block_type;
278  using external_block_type = typename SwappableBlockType::external_block_type;
279  using swappable_block_identifier_type = typename std::vector<SwappableBlockType>::size_type;
280 
281  /*/! Mode the block scheduler currently works in
282  enum mode
283  {
284  online, //serve requests immediately, without any prediction, LRU caching
285  simulation, //record prediction sequence only, do not serve requests, (returned blocks must not be accessed)
286  offline_lfd, //serve requests based on prediction sequence, using longest-forward-distance caching
287  offline_lfd_prefetch //serve requests based on prediction sequence, using longest-forward-distance caching, and prefetching
288  };*/
289 
290 public:
291  // -------- prediction_sequence -------
293  {
300  op_extract_external_block
301  };
302 
304  {
308 
311  : op(op), id(id), time(time) { }
312  };
313 
314  using prediction_sequence_type = std::list<prediction_sequence_element>;
315  // ---- end prediction_sequence -------
316 
317 protected:
318  template <class SBT>
320 
321  const size_t max_internal_blocks;
323  //! Stores pointers to arrays of internal_blocks. Used to deallocate them only.
324  std::stack<internal_block_type*> internal_blocks_blocks;
325  //! holds free internal_blocks with attributes reset.
326  std::stack<internal_block_type*> free_internal_blocks;
327  //! temporary blocks that will not be needed after algorithm termination.
328  mutable std::vector<SwappableBlockType> swappable_blocks;
329  //! holds indices of free swappable_blocks with attributes reset.
330  std::priority_queue<swappable_block_identifier_type, std::vector<swappable_block_identifier_type>,
331  std::greater<swappable_block_identifier_type> > free_swappable_blocks;
334 
335  //! Get an internal_block from the freelist or a newly allocated one if available.
336  //! \return Pointer to the internal_block. nullptr if none available.
338  {
339  if (! free_internal_blocks.empty())
340  {
341  // => there are internal_blocks in the free-list
342  internal_block_type* iblock = free_internal_blocks.top();
343  free_internal_blocks.pop();
344  return iblock;
345  }
346  else if (remaining_internal_blocks > 0)
347  {
348  // => more internal_blocks can be allocated
349  size_t num_blocks = std::min(max_internal_blocks_alloc_at_once, remaining_internal_blocks);
350  remaining_internal_blocks -= num_blocks;
351  internal_block_type* iblocks = new internal_block_type[num_blocks];
352  internal_blocks_blocks.push(iblocks);
353  for (size_t i = num_blocks - 1; i > 0; --i)
354  free_internal_blocks.push(iblocks + i);
355  return iblocks;
356  }
357  else
358  {
359  // => no internal_block available
360  return 0;
361  }
362  }
363 
364  //! Return an internal_block to the freelist.
366  { free_internal_blocks.push(iblock); }
367 
368 public:
369  //! Create a block_scheduler with empty prediction sequence in simple mode.
370  //! \param max_internal_memory Amount of internal memory (in bytes) the scheduler is allowed to use for acquiring, prefetching and caching.
371  explicit block_scheduler(const size_t max_internal_memory)
372  : max_internal_blocks(div_ceil(max_internal_memory, sizeof(internal_block_type))),
373  remaining_internal_blocks(max_internal_blocks),
374  bm(block_manager::get_instance()),
375  algo(0)
376  {
378  }
379 
380  //! non-copyable: delete copy-constructor
381  block_scheduler(const block_scheduler&) = delete;
382  //! non-copyable: delete assignment operator
383  block_scheduler& operator = (const block_scheduler&) = delete;
384 
386  {
387  delete algo;
388  size_t num_freed_internal_blocks = 0;
389  if (free_swappable_blocks.size() != swappable_blocks.size())
390  {
391  // => not all swappable_blocks are free, at least deinitialize them
392  TLX_LOG1 << "not all swappable_blocks are free, those not acquired will be deinitialized";
393  // evictable_blocks would suffice
394  for (typename std::vector<SwappableBlockType>::iterator it = swappable_blocks.begin();
395  it != swappable_blocks.end(); ++it)
396  {
397  if (! it->is_acquired() && it->deinitialize())
398  // count internal_blocks that get freed
399  num_freed_internal_blocks++;
400  }
401  }
402  if (int64_t nlost = static_cast<int64_t>(max_internal_blocks - remaining_internal_blocks)
403  - static_cast<int64_t>(free_internal_blocks.size() + num_freed_internal_blocks)) {
404  TLX_LOG1 << nlost << " internal_blocks are lost. They will get deallocated.";
405  }
406  while (! internal_blocks_blocks.empty())
407  {
408  delete[] internal_blocks_blocks.top();
409  internal_blocks_blocks.pop();
410  }
411  }
412 
413  //! Acquire the given block.
414  //! Has to be in pairs with release. Pairs may be nested and interleaved.
415  //! \return Reference to the block's data.
416  //! param sbid Swappable block to acquire.
417  internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
418  { return algo->acquire(sbid, uninitialized); }
419 
420  //! Release the given block.
421  //! Has to be in pairs with acquire. Pairs may be nested and interleaved.
422  //! \param sbid Swappable block to release.
423  //! \param dirty If the data has been changed, invalidating possible data in external storage.
424  void release(const swappable_block_identifier_type sbid, const bool dirty)
425  { algo->release(sbid, dirty); }
426 
427  //! Drop all data in the given block, freeing in- and external memory.
429  { algo->deinitialize(sbid); }
430 
431  //! Initialize the swappable_block with the given external_block.
432  //!
433  //! It will use the the external_block for swapping and take care about
434  //! it's deallocation. Has to be uninitialized.
435  //! \param sbid identifier to the swappable_block
436  //! \param eblock external_block a.k.a. bid
438  { algo->initialize(sbid, eblock); }
439 
440  //! Deinitialize the swappable_block and return it's contents in an external_block.
441  //!
442  //! \param sbid identifier to the swappable_block
443  //! \return external_block a.k.a. bid
445  { return algo->extract_external_block(sbid); }
446 
447  //! check if the swappable_block is initialized.
448  //! \param sbid identifier to the swappable_block
449  //! \return if the swappable_block is initialized
451  { return algo->is_initialized(sbid); }
452 
453  //! Record a timestep in the prediction sequence to seperate consecutive
454  //! acquire rsp. release-operations. Has an effect only in simulation mode.
456  { algo->explicit_timestep(); }
457 
458  //! Get a const reference to given block's data. Block has to be already acquired.
459  //! \param sbid Swappable block to access.
461  { return swappable_blocks[sbid].get_internal_block(); }
462 
463  //! Allocate an uninitialized swappable_block.
464  //! \return An identifier of the block.
466  {
468  if (free_swappable_blocks.empty())
469  {
470  // create new swappable_block
471  sbid = swappable_blocks.size();
472  swappable_blocks.resize(sbid + 1);
473  algo->swappable_blocks_resize(sbid + 1);
474  }
475  else
476  {
477  // take swappable_block from freelist
478  sbid = free_swappable_blocks.top();
479  free_swappable_blocks.pop();
480  }
481  return sbid;
482  }
483 
484  //! Free given no longer used temporary swappable_block.
485  //! \param sbid Temporary swappable_block to free.
487  {
488  deinitialize(sbid);
489  free_swappable_blocks.push(sbid);
490  }
491 
492  //! Returns if simulation mode is on, i.e. if a prediction sequence is being recorded.
493  //! \return If simulation mode is on.
494  bool is_simulating() const
495  { return algo->is_simulating(); }
496 
497  //! Switch the used algorithm, e.g. to simulation etc..
498  //! \param new_algo Pointer to the new algorithm object. Has to be instantiated to the block scheduler (or the old algorithm object).
499  //! \return Pointer to the old algorithm object.
501  {
502  assert(&new_algo->bs == this);
504  algo = new_algo;
505  return old_algo;
506  }
507 
508  //! Return the current algorithm.
510  {
511  return algo;
512  }
513 
514  //! Get the prediction_sequence.
515  //! \return reference to the prediction_sequence
517  { return algo->get_prediction_sequence(); }
518 
519  void flush()
520  {
521  std::vector<request_ptr> requests;
522  while (! algo->evictable_blocks_empty())
523  {
525  request_ptr rq = swappable_blocks[sbid].clean_async();
526  if (rq.valid())
527  requests.push_back(rq);
528  return_free_internal_block(swappable_blocks[sbid].detach_internal_block());
529  }
530  for (typename std::vector<request_ptr>::reverse_iterator it = requests.rbegin();
531  it != requests.rend(); ++it)
532  {
533  (*it)->wait();
534  }
535  }
536 };
537 
538 template <class SwappableBlockType>
540 
541 //! Interface of a block scheduling algorithm.
542 template <class SwappableBlockType>
544 {
545 protected:
552 
553 public:
555 
556 protected:
557  std::vector<SwappableBlockType>& swappable_blocks;
559 
561  { return bs.algo; }
562 
563  //! Get an internal_block from the block_scheduler.
564  //! \return Pointer to the internal_block. nullptr if none available.
566  { return bs.get_free_internal_block(); }
567 
568  //! Return an internal_block to the block_scheduler.
570  { bs.return_free_internal_block(iblock); }
571 
572 public:
574  : bs(bs),
575  swappable_blocks(bs.swappable_blocks)
576  { }
577 
579  : bs(old->bs),
580  swappable_blocks(bs.swappable_blocks)
581  { }
582 
583  //! non-copyable: delete copy-constructor
585  //! non-copyable: delete assignment operator
586  block_scheduler_algorithm& operator = (const block_scheduler_algorithm&) = delete;
587 
589 
590  virtual bool evictable_blocks_empty() = 0;
591  virtual swappable_block_identifier_type evictable_blocks_pop() = 0;
593 
594  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false) = 0;
595  virtual void release(swappable_block_identifier_type sbid, const bool dirty) = 0;
596  virtual void deinitialize(swappable_block_identifier_type sbid) = 0;
597  virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock) = 0;
599 
600  virtual bool is_initialized(const swappable_block_identifier_type sbid) const
601  { return swappable_blocks[sbid].is_initialized(); }
602 
603  virtual void explicit_timestep() { }
604  virtual bool is_simulating() const
605  { return false; }
607  { return prediction_sequence; }
608 };
609 
610 //! Block scheduling algorithm caching via the least recently used policy (online).
611 template <class SwappableBlockType>
612 class block_scheduler_algorithm_online_lru : public block_scheduler_algorithm<SwappableBlockType>
613 {
614 protected:
620 
621  using block_scheduler_algorithm_type::bs;
622  using block_scheduler_algorithm_type::swappable_blocks;
623  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
624  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
625  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
626 
627  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
629 
631  {
632  // try to get a free internal_block
633  if (internal_block_type* iblock = get_free_internal_block_from_block_scheduler())
634  return iblock;
635  // evict block
636  assert(! evictable_blocks.empty()); // fails it there is not enough memory available
637  return swappable_blocks[evictable_blocks.pop()].detach_internal_block();
638  }
639 
641  { return_free_internal_block_to_block_scheduler(iblock); }
642 
643  void init()
644  {
645  if (get_algorithm_from_block_scheduler())
646  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
647  evictable_blocks.insert(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
648  }
649 
650 public:
653  { init(); }
654 
657  { init(); }
658 
660  {
661  if (! evictable_blocks.empty())
662  TLX_LOG1 << "Destructing block_scheduler_algorithm_online that still holds evictable blocks. They get deinitialized.";
663  while (! evictable_blocks.empty())
664  {
665  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.pop()];
666  if (internal_block_type* iblock = sblock.deinitialize())
667  return_free_internal_block(iblock);
668  }
669  }
670 
671  virtual bool evictable_blocks_empty()
672  { return evictable_blocks.empty(); }
673 
675  { return evictable_blocks.pop(); }
676 
677  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
678  {
679  SwappableBlockType& sblock = swappable_blocks[sbid];
680  /* acquired => internal -> increase reference count
681  internal but not acquired -> remove from evictable_blocks, increase reference count
682  not internal => uninitialized or external -> get internal_block, increase reference count
683  uninitialized -> fill with default value
684  external -> read */
685  if (sblock.is_internal())
686  {
687  if (! sblock.is_acquired())
688  // not acquired yet -> remove from evictable_blocks
689  evictable_blocks.erase(sbid);
690  sblock.acquire();
691  }
692  else if (sblock.is_initialized())
693  {
694  // => external but not internal
695  //get internal_block
696  sblock.attach_internal_block(get_free_internal_block());
697  if (! uninitialized)
698  //load block synchronously
699  sblock.read_sync();
700  sblock.acquire();
701  }
702  else
703  {
704  // => ! sblock.is_initialized()
705  //get internal_block
706  sblock.attach_internal_block(get_free_internal_block());
707  sblock.acquire();
708  //initialize new block
709  if (! uninitialized)
710  sblock.fill_default();
711  }
712  return sblock.get_internal_block();
713  }
714 
715  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
716  {
717  SwappableBlockType& sblock = swappable_blocks[sbid];
718  sblock.make_dirty_if(dirty);
719  sblock.release();
720  if (! sblock.is_acquired())
721  {
722  if (sblock.is_dirty() || sblock.is_external())
723  // => evictable, put in pq
724  evictable_blocks.insert(sbid);
725  else
726  // => uninitialized, release internal block and put it in freelist
727  return_free_internal_block(sblock.detach_internal_block());
728  }
729  }
730 
732  {
733  SwappableBlockType& sblock = swappable_blocks[sbid];
734  if (sblock.is_evictable())
735  evictable_blocks.erase(sbid);
736  if (internal_block_type* iblock = sblock.deinitialize())
737  return_free_internal_block(iblock);
738  }
739 
741  {
742  SwappableBlockType& sblock = swappable_blocks[sbid];
743  sblock.initialize(eblock);
744  }
745 
747  {
748  SwappableBlockType& sblock = swappable_blocks[sbid];
749  if (sblock.is_evictable())
750  evictable_blocks.erase(sbid);
751  if (sblock.is_internal())
752  return_free_internal_block(sblock.detach_internal_block());
753  return sblock.extract_external_block();
754  }
755 };
756 
757 //! Pseudo block scheduling algorithm only recording the request sequence.
758 template <class SwappableBlockType>
760 {
761 protected:
769 
770  using block_scheduler_algorithm_type::bs;
771  using block_scheduler_algorithm_type::prediction_sequence;
772  using block_scheduler_algorithm_type::swappable_blocks;
773  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
774  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
775  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
776 
777  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
778  std::stack<swappable_block_identifier_type> evictable_blocks;
781  std::vector<size_t> reference_counts;
783 
785  { return_free_internal_block_to_block_scheduler(iblock); }
786 
787  void init()
788  {
789  if (get_algorithm_from_block_scheduler())
790  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
791  evictable_blocks.push(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
792  for (swappable_block_identifier_type i = 0; i < reference_counts.size(); ++i)
793  reference_counts[i] = swappable_blocks[i].is_initialized();
794  }
795 
796 public:
799  time_count(0),
800  last_op_release(false),
801  reference_counts(swappable_blocks.size())
802  { init(); }
803 
806  time_count(0),
807  last_op_release(false),
808  reference_counts(swappable_blocks.size())
809  { init(); }
810 
812  {
813  if (! evictable_blocks.empty())
814  TLX_LOG1 << "Destructing block_scheduler_algorithm_record_prediction_sequence that still holds evictable blocks. They get deinitialized.";
815  while (! evictable_blocks.empty())
816  {
817  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.top()];
818  if (internal_block_type* iblock = sblock.deinitialize())
819  return_free_internal_block(iblock);
820  evictable_blocks.pop();
821  }
822  }
823 
824  virtual bool evictable_blocks_empty()
825  { return evictable_blocks.empty(); }
826 
828  {
829  swappable_block_identifier_type sbid = evictable_blocks.top();
830  evictable_blocks.pop();
831  return sbid;
832  }
833 
834  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
835  {
836  ++reference_counts[sbid];
837  last_op_release = false;
838  if (uninitialized)
839  prediction_sequence.push_back(
840  prediction_sequence_element_type(block_scheduler_type::op_acquire_uninitialized, sbid, time_count)
841  );
842  else
843  prediction_sequence.push_back(
844  prediction_sequence_element_type(block_scheduler_type::op_acquire, sbid, time_count)
845  );
846  return dummy_block;
847  }
848 
849  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
850  {
851  --reference_counts[sbid] += dirty;
852  time_count += ! last_op_release;
853  last_op_release = true;
854  if (dirty)
855  prediction_sequence.push_back(
856  prediction_sequence_element_type(block_scheduler_type::op_release_dirty, sbid, time_count)
857  );
858  else
859  prediction_sequence.push_back(
860  prediction_sequence_element_type(block_scheduler_type::op_release, sbid, time_count)
861  );
862  }
863 
865  {
866  reference_counts[sbid] = false;
867  prediction_sequence.push_back(
868  prediction_sequence_element_type(block_scheduler_type::op_deinitialize, sbid, time_count)
869  );
870  }
871 
873  {
874  reference_counts[sbid] = true;
875  prediction_sequence.push_back(
876  prediction_sequence_element_type(block_scheduler_type::op_initialize, sbid, time_count)
877  );
878  }
879 
881  {
882  reference_counts[sbid] = false;
883  prediction_sequence.push_back(
884  prediction_sequence_element_type(block_scheduler_type::op_extract_external_block, sbid, time_count)
885  );
886  return external_block_type();
887  }
888 
890  {
891  reference_counts.resize(size, 0);
892  }
893 
894  virtual bool is_initialized(const swappable_block_identifier_type sbid) const
895  {
896  return reference_counts[sbid] > 0;
897  }
898 
899  virtual void explicit_timestep()
900  { ++time_count; }
901 
902  virtual bool is_simulating() const
903  { return true; }
904 };
905 
906 //! Block scheduling algorithm caching via the longest forward distance policy (offline).
907 template <class SwappableBlockType>
909 {
910 protected:
918 
919  using block_scheduler_algorithm_type::bs;
920  using block_scheduler_algorithm_type::swappable_blocks;
921  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
922  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
923  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
924 
925  class priority
926  {
927  size_t p;
928 
929  public:
930  priority(const SwappableBlockType& sblock, const std::pair<bool, time_type>& t)
931  {
932  // p larger => evict earlier
933  if (t.first)
934  {
935  // most significant: next use
936  p = size_t(t.second) << 2;
937  // less significant: not dirty
938  p |= size_t(! sblock.is_dirty()) << 1;
939  // less significant: has external_block
940  p |= size_t(sblock.has_external_block()) << 0;
941  }
942  else
943  {
944  // most significant: next use
946  // less significant: next operation: extract > accessed no more > deinitialize
947  p |= size_t(t.second) << 0;
948  }
949  }
950 
951  // less => evict earlier
952  bool operator < (const priority& right) const
953  { return p > right.p; }
954  };
955 
956  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
958  /*!
959  * Stores for the sequence of releases extracted from the prediction_sequence:
960  * (true, timestamp of the blocks next acquire) if it is acquired next
961  * (false, 0) if it is deinitialized next
962  * (false, 1) if it is not accessed any more
963  * (false, 2) if it is extracted next
964  * (false, 3) if it is initialized next
965  */
966  std::deque<std::pair<bool, time_type> > next_use;
967 
969  {
970  // try to get a free internal_block
971  if (internal_block_type* iblock = get_free_internal_block_from_block_scheduler())
972  return iblock;
973  // evict block
974  assert(! evictable_blocks.empty()); // fails it there is not enough memory available
975  return swappable_blocks[evictable_blocks.pop()].detach_internal_block();
976  }
977 
979  { return_free_internal_block_to_block_scheduler(iblock); }
980 
982  {
983  std::vector<std::pair<bool, time_type> >
984  blocks_next_acquire(swappable_blocks.size(), std::make_pair(false, 1));
985  if (old_algo)
986  {
987  // precomputations for priorities: init next_acquires
988  const prediction_sequence_type& ps = old_algo->get_prediction_sequence();
989  for (typename prediction_sequence_type::const_reverse_iterator it = ps.rbegin(); it != ps.rend(); ++it)
990  {
991  switch (it->op)
992  {
993  case (block_scheduler_type::op_acquire):
994  case (block_scheduler_type::op_acquire_uninitialized):
995  blocks_next_acquire[it->id] = std::make_pair(true, it->time);
996  break;
997  case (block_scheduler_type::op_release):
998  case (block_scheduler_type::op_release_dirty):
999  next_use.push_front(blocks_next_acquire[it->id]);
1000  break;
1001  case (block_scheduler_type::op_deinitialize):
1002  blocks_next_acquire[it->id] = std::make_pair(false, 0);
1003  break;
1004  case (block_scheduler_type::op_initialize):
1005  blocks_next_acquire[it->id] = std::make_pair(false, 3);
1006  break;
1007  case (block_scheduler_type::op_extract_external_block):
1008  blocks_next_acquire[it->id] = std::make_pair(false, 2);
1009  break;
1010  }
1011  }
1012  }
1013  if (get_algorithm_from_block_scheduler())
1014  {
1015  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
1016  {
1017  // insert already evictable blocks with the right priority
1018  const swappable_block_identifier_type sbid = get_algorithm_from_block_scheduler()->evictable_blocks_pop();
1019  evictable_blocks.insert(sbid, priority(swappable_blocks[sbid], blocks_next_acquire[sbid]));
1020  }
1021  }
1022  }
1023 
1024 public:
1027  { init(get_algorithm_from_block_scheduler()); }
1028 
1029  // It is possible to keep an old simulation-algorithm object and reuse it's prediction sequence
1032  { init(old); }
1033 
1035  {
1036  if (! evictable_blocks.empty())
1037  TLX_LOG1 << "Destructing block_scheduler_algorithm_offline_lfd that still holds evictable blocks. They get deinitialized.";
1038  while (! evictable_blocks.empty())
1039  {
1040  SwappableBlockType& sblock = swappable_blocks[evictable_blocks.pop()];
1041  if (internal_block_type* iblock = sblock.deinitialize())
1042  return_free_internal_block(iblock);
1043  }
1044  }
1045 
1046  virtual bool evictable_blocks_empty()
1047  { return evictable_blocks.empty(); }
1048 
1050  { return evictable_blocks.pop(); }
1051 
1052  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
1053  {
1054  SwappableBlockType& sblock = swappable_blocks[sbid];
1055  /* acquired => internal -> increase reference count
1056  internal but not acquired -> remove from evictable_blocks, increase reference count
1057  not intern => uninitialized or external -> get internal_block, increase reference count
1058  uninitialized -> fill with default value
1059  external -> read */
1060  if (sblock.is_internal())
1061  {
1062  if (! sblock.is_acquired())
1063  // not acquired yet -> remove from evictable_blocks
1064  evictable_blocks.erase(sbid);
1065  sblock.acquire();
1066  }
1067  else if (sblock.is_initialized())
1068  {
1069  // => external but not internal
1070  //get internal_block
1071  sblock.attach_internal_block(get_free_internal_block());
1072  if (! uninitialized)
1073  //load block synchronously
1074  sblock.read_sync();
1075  sblock.acquire();
1076  }
1077  else
1078  {
1079  // => ! sblock.is_initialized()
1080  //get internal_block
1081  sblock.attach_internal_block(get_free_internal_block());
1082  sblock.acquire();
1083  //initialize new block
1084  if (! uninitialized)
1085  sblock.fill_default();
1086  }
1087  return sblock.get_internal_block();
1088  }
1089 
1090  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
1091  {
1092  if (next_use.empty())
1093  {
1094  TLX_LOG1 << "block_scheduler_algorithm_offline_lfd got release-request but prediction sequence ended. Switching to block_scheduler_algorithm_online.";
1095  // switch algorithm
1096  block_scheduler_algorithm_type* new_algo, * old_algo;
1098  old_algo = bs.switch_algorithm_to(new_algo);
1099  // redirect call
1100  new_algo->release(sbid, dirty);
1101  // delete self
1102  delete old_algo;
1103  return;
1104  }
1105  SwappableBlockType& sblock = swappable_blocks[sbid];
1106  sblock.make_dirty_if(dirty);
1107  sblock.release();
1108  if (! sblock.is_acquired())
1109  {
1110  if (sblock.is_dirty() || sblock.is_external())
1111  // => evictable, put in pq
1112  evictable_blocks.insert(sbid, priority(swappable_blocks[sbid], next_use.front()));
1113  else
1114  // => uninitialized, release internal block and put it in freelist
1115  return_free_internal_block(sblock.detach_internal_block());
1116  }
1117  next_use.pop_front();
1118  }
1119 
1121  {
1122  SwappableBlockType& sblock = swappable_blocks[sbid];
1123  if (sblock.is_evictable())
1124  evictable_blocks.erase(sbid);
1125  if (internal_block_type* iblock = sblock.deinitialize())
1126  return_free_internal_block(iblock);
1127  }
1128 
1130  {
1131  SwappableBlockType& sblock = swappable_blocks[sbid];
1132  sblock.initialize(eblock);
1133  }
1134 
1136  {
1137  SwappableBlockType& sblock = swappable_blocks[sbid];
1138  if (sblock.is_evictable())
1139  evictable_blocks.erase(sbid);
1140  if (sblock.is_internal())
1141  return_free_internal_block(sblock.detach_internal_block());
1142  return sblock.extract_external_block();
1143  }
1144 };
1145 
1146 //! Block scheduling algorithm caching via the least recently used policy
1147 //! (offline), and prefetching in addition.
1148 template <class SwappableBlockType>
1150 {
1151 protected:
1152  struct scheduled_block_meta;
1153  struct write_read_request;
1154 
1163  using swappable_blocks_iterator = typename std::vector<SwappableBlockType>::iterator;
1164 
1165  using scheduled_blocks_type = std::map<swappable_block_identifier_type, scheduled_block_meta>;
1166  using scheduled_blocks_iterator = typename scheduled_blocks_type::iterator;
1167  using scheduled_blocks_reference = typename scheduled_blocks_type::reference;
1168  using write_scheduled_blocks_type = std::map<swappable_block_identifier_type, write_read_request*>;
1169  using write_scheduled_blocks_iterator = typename write_scheduled_blocks_type::iterator;
1170  using write_scheduled_blocks_reference = typename write_scheduled_blocks_type::reference;
1171 
1172  using block_scheduler_algorithm_type::bs;
1173  using block_scheduler_algorithm_type::swappable_blocks;
1174  using block_scheduler_algorithm_type::get_algorithm_from_block_scheduler;
1175  using block_scheduler_algorithm_type::get_free_internal_block_from_block_scheduler;
1176  using block_scheduler_algorithm_type::return_free_internal_block_to_block_scheduler;
1177  using block_scheduler_algorithm_type::prediction_sequence;
1178 
1180  {
1181  bool write_done_soon; // set by read_after_write, checked by schedule_read()
1182  bool shall_read; // checked by read_after_write, set by schedule_read()
1183  swappable_blocks_iterator block_to_start_read; // used by read_after_write, set by schedule_read()
1184  scheduled_blocks_iterator taker; // read_req set by read_after_write
1185  request_ptr write_req; // completes with read_after_write
1186 
1188  : write_done_soon(false), shall_read(false), block_to_start_read(),
1189  taker(), write_req(nullptr) { }
1190  };
1191 
1193  {
1195 
1196  explicit read_after_write(write_read_request* write_read_req)
1197  : wrr(write_read_req) { }
1198 
1199  void operator () (request*, bool /* success */)
1200  {
1201  wrr->write_done_soon = true;
1202  if (wrr->shall_read)
1203  wrr->taker->second.read_req = wrr->block_to_start_read->read_async();
1204  }
1205  };
1206 
1208  {
1210  std::pair<bool, swappable_block_identifier_type> giver;
1212  std::deque<block_scheduler_operation> operations; // invariant: not empty; front: last scheduled operation, back: upcoming operation
1213 
1215  : reserved_iblock(0),
1216  giver(false, 0),
1217  read_req(nullptr),
1218  operations()
1219  { operations.push_front(op); }
1220  };
1221 
1222  //! Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired.
1223  std::set<swappable_block_identifier_type> free_evictable_blocks;
1224  std::set<swappable_block_identifier_type> scheduled_evictable_blocks;
1225 
1226  //! Holds not internal swappable_blocks, whose next access has already been scheduled.
1228 
1229  //! Holds swappable_blocks, whose internal block has been taken away but the clean did not finish yet.
1231  typename prediction_sequence_type::iterator next_op_to_schedule;
1232 
1233  //! Schedule an internal, possibly dirty swappable_block to write.
1234  //!
1235  //! The block becomes not dirty. if it was dirty, an entry in write_scheduled_blocks is made referencing the write_read_request.
1236  //! \param sbid block to write
1237  //! \return pointer to the write_read_request
1239  {
1240  SwappableBlockType& sblock = swappable_blocks[sbid];
1242  wrr->write_req = sblock.clean_async(read_after_write(wrr));
1243  if (wrr->write_req.valid())
1244  {
1245  bool t = write_scheduled_blocks.insert(std::make_pair(sbid, wrr)).second;
1246  tlx::unused(t);
1247 
1248  assert(t);
1249  return wrr;
1250  }
1251  else
1252  {
1253  delete wrr;
1254  return 0;
1255  }
1256  }
1257 
1258  //! Try to interrupt a read scheduled in a write_read_request.
1259  //!
1260  //! side-effect: possibly erases entry from write_scheduled_blocks, so the iterator writing_block may become invalid
1261  //! \return if successful
1263  {
1264  // stop read
1265  writing_block->second->shall_read = false;
1266  // check if stopped
1267  if (! writing_block->second->write_done_soon)
1268  return true;
1269  // => possibly to late
1270  scheduled_blocks_reference taker = *writing_block->second->taker;
1271  // wait
1272  wait_on_write(writing_block);
1273  // check if read started
1274  if (taker.second.read_req.valid())
1275  // => read started, to late
1276  return false;
1277  else
1278  // => just in time
1279  return true;
1280  }
1281 
1282  //! Schedule an internal and external block to read.
1283  //!
1284  //! If the giver is still writing, schedule read via its write_read_request.
1286  {
1287  // first check if block_to_read is still writing. do not read before write finished
1288  // wait_on_write(block_to_read->first);
1289 
1290  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->first);
1291  if (it != write_scheduled_blocks.end())
1292  {
1293  scheduled_blocks_iterator other_block_to_read = it->second->taker;
1294  // check if scheduled to read
1295  if (it->second->shall_read)
1296  {
1297  if (try_interrupt_read(it))
1298  {
1299  // => interrupted, swap internal_blocks
1300  std::swap(other_block_to_read->second.giver, block_to_read->second.giver);
1301  if (other_block_to_read->second.giver.first)
1302  {
1303  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(other_block_to_read->second.giver.second);
1304  if (it != write_scheduled_blocks.end())
1305  it->second->taker = other_block_to_read;
1306  else
1307  other_block_to_read->second.giver.first = false;
1308  }
1309  if (block_to_read->second.giver.first)
1310  {
1311  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->second.giver.second);
1312  if (it != write_scheduled_blocks.end())
1313  it->second->taker = block_to_read;
1314  else
1315  block_to_read->second.giver.first = false;
1316  }
1317 
1318  internal_block_type* tmp_iblock = swappable_blocks[block_to_read->first].detach_internal_block();
1319  swappable_blocks[block_to_read->first].attach_internal_block(
1320  swappable_blocks[other_block_to_read->first].detach_internal_block()
1321  );
1322  swappable_blocks[other_block_to_read->first].attach_internal_block(tmp_iblock);
1323  // => this block has its internal_block back, no need to read
1324  // reschedule other
1325  schedule_read(other_block_to_read);
1326  return;
1327  }
1328  // else => read already started, but write done -> read this
1329  }
1330  else
1331  {
1332  // => no read scheduled, swap internal_blocks
1333  std::swap(other_block_to_read->second.giver, block_to_read->second.giver);
1334  if (other_block_to_read->second.giver.first)
1335  {
1336  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(other_block_to_read->second.giver.second);
1337  if (it != write_scheduled_blocks.end())
1338  it->second->taker = other_block_to_read;
1339  else
1340  other_block_to_read->second.giver.first = false;
1341  }
1342  if (block_to_read->second.giver.first)
1343  {
1344  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(block_to_read->second.giver.second);
1345  if (it != write_scheduled_blocks.end())
1346  it->second->taker = block_to_read;
1347  else
1348  block_to_read->second.giver.first = false;
1349  }
1350 
1351  internal_block_type* tmp_iblock = swappable_blocks[block_to_read->first].detach_internal_block();
1352  swappable_blocks[block_to_read->first].attach_internal_block(
1353  other_block_to_read->second.reserved_iblock
1354  );
1355  other_block_to_read->second.reserved_iblock = tmp_iblock;
1356  // => this block has its internal_block back, no need to read
1357  return;
1358  }
1359  }
1360 
1361  // schedule block_to_read to read
1362  if (block_to_read->second.giver.first)
1363  {
1364  write_scheduled_blocks_iterator writing_block = write_scheduled_blocks.find(block_to_read->second.giver.second);
1365  if (writing_block != write_scheduled_blocks.end())
1366  {
1367  // => there is a write scheduled
1368  // tell the completion handler that we want a read
1369  writing_block->second->block_to_start_read = swappable_blocks.begin() + block_to_read->first;
1370  writing_block->second->taker = block_to_read;
1371  writing_block->second->shall_read = true;
1372  // and check if it is not to late
1373  if (writing_block->second->write_done_soon)
1374  {
1375  // => the completion handler may have missed our wish to read
1376  // so wait for it to finish and check
1377  wait_on_write(writing_block);
1378  block_to_read->second.giver.first = false;
1379  if (block_to_read->second.read_req.valid())
1380  // read scheduled
1381  return;
1382  }
1383  else
1384  // read scheduled
1385  return;
1386  }
1387  else
1388  block_to_read->second.giver.first = false;
1389  }
1390  // => read could not be scheduled through the completion handler
1391  block_to_read->second.read_req = swappable_blocks[block_to_read->first].read_async();
1392  }
1393 
1394  //! Wait for the write to finish.
1395  //!
1396  //! side-effect: erases entry from write_scheduled_blocks
1398  {
1399  writing_block->second->write_req->wait();
1400  delete writing_block->second;
1401  write_scheduled_blocks.erase(writing_block);
1402  }
1403 
1404  //! Wait for the write to finish.
1405  //!
1406  //! side-effect: erases entry from write_scheduled_blocks
1408  {
1409  write_scheduled_blocks_iterator it = write_scheduled_blocks.find(writing_block);
1410  if (it != write_scheduled_blocks.end())
1411  wait_on_write(it);
1412  }
1413 
1414  //! Wait for the write of the giver to finish.
1415  //!
1416  //! side-effect: erases entry from write_scheduled_blocks
1417  void wait_on_write(const scheduled_blocks_iterator& schedule_meta)
1418  {
1419  if (schedule_meta->second.giver.first)
1420  {
1421  wait_on_write(schedule_meta->second.giver.second);
1422  schedule_meta->second.giver.first = false;
1423  }
1424  }
1425 
1426  //! Wait for the read to finish.
1427  //!
1428  //! side-effect: erases entry for the write of the giver from write_scheduled_blocks
1429  void wait_on_read(const scheduled_blocks_iterator& schedule_meta)
1430  {
1431  wait_on_write(schedule_meta);
1432  if (schedule_meta->second.read_req.valid())
1433  {
1434  schedule_meta->second.read_req->wait();
1435  schedule_meta->second.read_req = 0;
1436  }
1437  }
1438 
1439  //! Wait for the write of the giver to finish and return reserved internal_block.
1440  //!
1441  //! side-effect: erases entry for the write of the giver from write_scheduled_blocks
1443  {
1444  wait_on_write(schedule_meta);
1445  internal_block_type* r = schedule_meta->second.reserved_iblock;
1446  schedule_meta->second.reserved_iblock = 0;
1447  return r;
1448  }
1449 
1450  bool shall_keep_internal_block(const scheduled_blocks_iterator& schedule_meta, const bool ignore_first = true) const
1451  {
1452  // returns true iif there is an acquire or acquire_uninitialized scheduled or there is a deinitialize scheduled and the block is dirty
1453  for (typename std::deque<block_scheduler_operation>::reverse_iterator
1454  rit = schedule_meta->second.operations.rbegin() + ignore_first;
1455  rit != schedule_meta->second.operations.rend(); ++rit)
1456  {
1457  switch (*rit)
1458  {
1459  case block_scheduler_type::op_acquire:
1460  case block_scheduler_type::op_acquire_uninitialized:
1461  return true;
1462  break;
1463  case block_scheduler_type::op_release:
1464  case block_scheduler_type::op_release_dirty:
1465  break;
1466  case block_scheduler_type::op_deinitialize:
1467  if (swappable_blocks[schedule_meta->first].is_dirty())
1468  return true;
1469  break;
1470  case block_scheduler_type::op_initialize:
1471  case block_scheduler_type::op_extract_external_block:
1472  break;
1473  }
1474  }
1475  return false;
1476  }
1477 
1478  // assumes the current operation to be still in operations
1479  bool shall_be_cleaned(const scheduled_blocks_iterator& schedule_meta) const
1480  {
1481  // returns true iif there is an extract_external_block scheduled and no release_dirty, deinitialize or initialize before
1482  for (typename std::deque<block_scheduler_operation>::reverse_iterator
1483  rit = schedule_meta->second.operations.rbegin() + 1;
1484  rit != schedule_meta->second.operations.rend(); ++rit)
1485  {
1486  switch (*rit)
1487  {
1488  case block_scheduler_type::op_acquire:
1489  case block_scheduler_type::op_acquire_uninitialized:
1490  case block_scheduler_type::op_release:
1491  break;
1492  case block_scheduler_type::op_release_dirty:
1493  case block_scheduler_type::op_deinitialize:
1494  case block_scheduler_type::op_initialize:
1495  return false;
1496  break;
1497  case block_scheduler_type::op_extract_external_block:
1498  return true;
1499  break;
1500  }
1501  }
1502  return false;
1503  }
1504 
1505  bool shall_be_read(const scheduled_blocks_iterator& schedule_meta, const bool ignore_first = true) const
1506  {
1507  // returns true iif there is an acquire scheduled next and the block is initialized
1508  return swappable_blocks[schedule_meta->first].is_initialized()
1509  && schedule_meta->second.operations.rbegin() + ignore_first != schedule_meta->second.operations.rend()
1510  && *(schedule_meta->second.operations.rbegin() + ignore_first) == block_scheduler_type::op_acquire;
1511  }
1512 
1514  {
1515  schedule_meta->second.operations.pop_back();
1516  if (schedule_meta->second.operations.empty())
1517  {
1518  assert(! schedule_meta->second.giver.first);
1519  scheduled_blocks.erase(schedule_meta);
1520  }
1521  }
1522 
1523  block_scheduler_algorithm_type * give_up(std::string err_msg = "detected some error in the prediction sequence")
1524  {
1525  TLX_LOG1 << "block_scheduler_algorithm_offline_lru_prefetching: " << err_msg << ". Switching to block_scheduler_algorithm_online.";
1526  // switch algorithm
1529  // and delete self
1530  delete bs.switch_algorithm_to(new_algo);
1531  return new_algo;
1532  }
1533 
1535  { return_free_internal_block_to_block_scheduler(iblock); }
1536 
1538  {
1539  while (next_op_to_schedule != prediction_sequence.end())
1540  {
1541  // list operation in scheduled_blocks
1542  std::pair<scheduled_blocks_iterator, bool> ins_res = scheduled_blocks.insert(
1543  std::make_pair(
1544  next_op_to_schedule->id,
1545  scheduled_block_meta(next_op_to_schedule->op)
1546  )
1547  );
1548  scheduled_blocks_iterator schedule_meta = ins_res.first;
1549  if (! ins_res.second)
1550  schedule_meta->second.operations.push_front(next_op_to_schedule->op);
1551  SwappableBlockType& sblock = swappable_blocks[next_op_to_schedule->id];
1552 
1553  // do appropriate preparations
1554  if (next_op_to_schedule->op == block_scheduler_type::op_acquire ||
1555  next_op_to_schedule->op == block_scheduler_type::op_acquire_uninitialized)
1556  {
1557  if (sblock.is_internal())
1558  {
1559  if (free_evictable_blocks.erase(next_op_to_schedule->id))
1560  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1561  }
1562  else
1563  {
1564  if (! schedule_meta->second.reserved_iblock)
1565  {
1566  // => needs internal_block -> try to get one
1567  // -> try to get one from block_scheduler
1568  schedule_meta->second.reserved_iblock = get_free_internal_block_from_block_scheduler();
1569  if (! schedule_meta->second.reserved_iblock)
1570  {
1571  // -> try to get one by evicting
1572  if (free_evictable_blocks.empty())
1573  {
1574  // => can not schedule acquire
1575  // remove operation from scheduled_blocks
1576  if (ins_res.second)
1577  scheduled_blocks.erase(ins_res.first);
1578  else
1579  schedule_meta->second.operations.pop_front();
1580  // stop scheduling
1581  return;
1582  }
1583  swappable_block_identifier_type giver = pop_begin(free_evictable_blocks);
1584  {
1585  assert(
1586  scheduled_blocks.find(giver) == scheduled_blocks.end() ||
1587  !shall_keep_internal_block(scheduled_blocks.find(giver), false)
1588  );
1589  }
1590  write_read_request* wrr = schedule_write(giver);
1591  schedule_meta->second.giver.first = (wrr != nullptr);
1592  schedule_meta->second.giver.second = giver;
1593  schedule_meta->second.reserved_iblock = swappable_blocks[giver].detach_internal_block();
1594  if (wrr)
1595  wrr->taker = schedule_meta;
1596  }
1597  // read if desired
1598  if (shall_be_read(schedule_meta, false))
1599  {
1600  // => there is no operation scheduled for this block before this acquire and it is initialized
1601  // -> start prefetching now
1602  sblock.attach_internal_block(schedule_meta->second.reserved_iblock);
1603  schedule_meta->second.reserved_iblock = 0;
1604  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1605  schedule_read(schedule_meta);
1606  }
1607  }
1608  }
1609  }
1610  else if (next_op_to_schedule->op == block_scheduler_type::op_deinitialize)
1611  {
1612  if (sblock.is_dirty())
1613  if (free_evictable_blocks.erase(next_op_to_schedule->id))
1614  scheduled_evictable_blocks.insert(next_op_to_schedule->id);
1615  }
1616 
1617  ++next_op_to_schedule;
1618  }
1619  for (typename std::set<swappable_block_identifier_type>::iterator it = free_evictable_blocks.begin();
1620  it != free_evictable_blocks.end(); ++it)
1621  {
1622  if (! write_scheduled_blocks.count(*it))
1623  schedule_write(*it);
1624  }
1625  }
1626 
1628  {
1629  if (old_algo)
1630  // copy prediction sequence
1631  prediction_sequence = old_algo->get_prediction_sequence();
1632  next_op_to_schedule = prediction_sequence.begin();
1633  if (get_algorithm_from_block_scheduler())
1634  while (! get_algorithm_from_block_scheduler()->evictable_blocks_empty())
1635  free_evictable_blocks.insert(get_algorithm_from_block_scheduler()->evictable_blocks_pop());
1636  schedule_next_operations();
1637  }
1638 
1639  void deinit()
1640  {
1641  // TODO remove
1642  if (! scheduled_blocks.empty())
1643  TLX_LOG1 << "deinit while scheduled_blocks not empty";
1644 
1645  if (! scheduled_evictable_blocks.empty())
1646  TLX_LOG1 << "deinit while scheduled_evictable_blocks not empty";
1647 
1648  // empty scheduled_blocks
1649  free_evictable_blocks.insert(scheduled_evictable_blocks.begin(), scheduled_evictable_blocks.end());
1650  //for (typename std::set<swappable_block_identifier_type>::iterator it = scheduled_evictable_blocks.begin();
1651  // it != scheduled_evictable_blocks.end(); ++it)
1652  // free_evictable_blocks.insert(*it);
1653  scheduled_evictable_blocks.clear();
1654  while (! scheduled_blocks.empty())
1655  {
1656  scheduled_blocks_iterator it = scheduled_blocks.begin();
1657  wait_on_read(it);
1658  if (it->second.reserved_iblock)
1659  return_free_internal_block(it->second.reserved_iblock);
1660  scheduled_blocks.erase(it);
1661  }
1662  while (! write_scheduled_blocks.empty())
1663  {
1664  write_scheduled_blocks_iterator it = write_scheduled_blocks.begin();
1665  wait_on_write(it);
1666  }
1667  }
1668 
1669 public:
1672  { init(get_algorithm_from_block_scheduler()); }
1673 
1674  // It is possible to keep an old simulation-algorithm object and reuse it's prediction sequence
1677  { init(old); }
1678 
1680  {
1681  deinit();
1682  if (! free_evictable_blocks.empty())
1683  TLX_LOG1 << "Destructing block_scheduler_algorithm_offline_lru_prefetching that still holds evictable blocks. They get deinitialized.";
1684  while (! free_evictable_blocks.empty())
1685  {
1686  SwappableBlockType& sblock = swappable_blocks[pop_begin(free_evictable_blocks)];
1687  if (internal_block_type* iblock = sblock.deinitialize())
1688  return_free_internal_block(iblock);
1689  }
1690  }
1691 
1692  virtual bool evictable_blocks_empty()
1693  {
1694  deinit();
1695  return free_evictable_blocks.empty();
1696  }
1697 
1699  { return pop_begin(free_evictable_blocks); }
1700 
1701  virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized = false)
1702  {
1703  assert(! prediction_sequence.empty());
1704  assert(
1705  prediction_sequence.front().op ==
1706  ((uninitialized) ? block_scheduler_type::op_acquire_uninitialized : block_scheduler_type::op_acquire)
1707  );
1708  assert(prediction_sequence.front().id == sbid);
1709  prediction_sequence.pop_front();
1710  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1711  assert(schedule_meta != scheduled_blocks.end()); // acquire not scheduled or out of internal_blocks (i.e. not enough internal memory)
1712  assert(
1713  schedule_meta->second.operations.back() ==
1714  ((uninitialized) ? block_scheduler_type::op_acquire_uninitialized : block_scheduler_type::op_acquire)
1715  ); // acquire not scheduled or out of internal_blocks (i.e. not enough internal memory)
1716 
1717  SwappableBlockType& sblock = swappable_blocks[sbid];
1718  /* acquired => internal -> increase reference count
1719  internal but not acquired -> remove from scheduled_evictable_blocks, increase reference count
1720  not internal => uninitialized or external -> get internal_block, increase reference count
1721  uninitialized -> fill with default value
1722  external -> read */
1723  if (sblock.is_internal())
1724  {
1725  if (! sblock.is_acquired())
1726  {
1727  // not acquired yet -> remove from scheduled_evictable_blocks
1728  size_t t = scheduled_evictable_blocks.erase(sbid);
1729  tlx::unused(t);
1730 
1731  assert(t != 0);
1732  wait_on_read(schedule_meta);
1733  }
1734  sblock.acquire();
1735  }
1736  else
1737  {
1738  assert(uninitialized || ! sblock.is_initialized()); // initialized blocks should be scheduled to read and thus internal
1739  //get internal_block
1740  sblock.attach_internal_block(get_ready_block(schedule_meta));
1741  sblock.acquire();
1742  //initialize new block
1743  if (! uninitialized)
1744  sblock.fill_default();
1745  }
1746 
1747  operation_done(schedule_meta);
1748  return sblock.get_internal_block();
1749  }
1750 
1751  virtual void release(swappable_block_identifier_type sbid, const bool dirty)
1752  {
1753  assert(! prediction_sequence.empty());
1754  assert(
1755  prediction_sequence.front().op ==
1756  ((dirty) ? block_scheduler_type::op_release_dirty : block_scheduler_type::op_release)
1757  );
1758  assert(prediction_sequence.front().id == sbid);
1759  prediction_sequence.pop_front();
1760  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1761  assert(schedule_meta != scheduled_blocks.end());
1762  assert(
1763  schedule_meta->second.operations.back() ==
1764  ((dirty) ? block_scheduler_type::op_release_dirty : block_scheduler_type::op_release)
1765  );
1766 
1767  SwappableBlockType& sblock = swappable_blocks[sbid];
1768  sblock.make_dirty_if(dirty);
1769  sblock.release();
1770  if (! sblock.is_acquired())
1771  {
1772  if (sblock.is_dirty() || sblock.is_external())
1773  {
1774  // => evictable
1775  if (shall_keep_internal_block(schedule_meta))
1776  {
1777  // => swappable_block shall keep its internal_block
1778  scheduled_evictable_blocks.insert(sbid);
1779  if (shall_be_cleaned(schedule_meta))
1780  schedule_write(sbid);
1781  }
1782  else
1783  {
1784  // give block to scheduler
1785  free_evictable_blocks.insert(sbid);
1786  if (next_op_to_schedule != prediction_sequence.end())
1787  schedule_next_operations();
1788  else {
1789  if (! write_scheduled_blocks.count(sbid))
1790  schedule_write(sbid);
1791  }
1792  }
1793  }
1794  else
1795  {
1796  // => uninitialized
1797  if (shall_keep_internal_block(schedule_meta))
1798  // => swappable_block shall keep its internal_block
1799  schedule_meta->second.reserved_iblock = sblock.detach_internal_block();
1800  else
1801  {
1802  // release internal block and give it to prefetcher
1803  return_free_internal_block(sblock.detach_internal_block());
1804  if (next_op_to_schedule != prediction_sequence.end())
1805  schedule_next_operations();
1806  }
1807  }
1808  }
1809  operation_done(schedule_meta);
1810  }
1811 
1813  {
1814  assert(! prediction_sequence.empty());
1815  assert(prediction_sequence.front().op == block_scheduler_type::op_deinitialize);
1816  assert(prediction_sequence.front().id == sbid);
1817  prediction_sequence.pop_front();
1818  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1819  assert(schedule_meta != scheduled_blocks.end());
1820  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_deinitialize);
1821 
1822  SwappableBlockType& sblock = swappable_blocks[sbid];
1823  if (sblock.is_evictable())
1824  {
1825  size_t t;
1826  if (shall_keep_internal_block(schedule_meta, false))
1827  {
1828  t = scheduled_evictable_blocks.erase(sbid);
1829  if (t == 0) {
1830  TLX_LOG1 << "dirty block not scheduled on deinitialize";
1831  t = free_evictable_blocks.erase(sbid);
1832  }
1833  }
1834  else
1835  t = free_evictable_blocks.erase(sbid);
1836  assert(t != 0);
1837  }
1838  if (internal_block_type* iblock = sblock.deinitialize())
1839  {
1840  if (shall_keep_internal_block(schedule_meta))
1841  // => swappable_block shall keep its internal_block
1842  schedule_meta->second.reserved_iblock = iblock;
1843  else
1844  {
1845  // release internal block and give it to prefetcher
1846  return_free_internal_block(iblock);
1847  if (next_op_to_schedule != prediction_sequence.end())
1848  schedule_next_operations();
1849  }
1850  }
1851  operation_done(schedule_meta);
1852  }
1853 
1855  {
1856  assert(! prediction_sequence.empty());
1857  assert(prediction_sequence.front().op == block_scheduler_type::op_initialize);
1858  assert(prediction_sequence.front().id == sbid);
1859  prediction_sequence.pop_front();
1860  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1861  assert(schedule_meta != scheduled_blocks.end());
1862  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_initialize);
1863 
1864  SwappableBlockType& sblock = swappable_blocks[sbid];
1865  sblock.initialize(eblock);
1866  if (shall_be_read(schedule_meta))
1867  {
1868  sblock.attach_internal_block(schedule_meta->second.reserved_iblock);
1869  schedule_meta->second.reserved_iblock = 0;
1870  scheduled_evictable_blocks.insert(sbid);
1871  schedule_read(schedule_meta);
1872  }
1873  operation_done(schedule_meta);
1874  }
1875 
1877  {
1878  assert(! prediction_sequence.empty());
1879  assert(prediction_sequence.front().op == block_scheduler_type::op_extract_external_block);
1880  assert(prediction_sequence.front().id == sbid);
1881  prediction_sequence.pop_front();
1882  scheduled_blocks_iterator schedule_meta = scheduled_blocks.find(sbid);
1883  assert(schedule_meta != scheduled_blocks.end());
1884  assert(schedule_meta->second.operations.back() == block_scheduler_type::op_extract_external_block);
1885 
1886  SwappableBlockType& sblock = swappable_blocks[sbid];
1887  wait_on_write(sbid);
1888  operation_done(schedule_meta);
1889  return sblock.extract_external_block();
1890  }
1891 };
1892 
1893 } // namespace foxxll
1894 
1895 #endif // !FOXXLL_MNG_BLOCK_SCHEDULER_HEADER
1896 
1897 /**************************************************************************/
write_scheduled_blocks_type write_scheduled_blocks
Holds swappable_blocks, whose internal block has been taken away but the clean did not finish yet...
block_scheduler_algorithm_offline_lfd(block_scheduler_type &bs)
virtual void deinitialize(swappable_block_identifier_type sbid)
priority(const SwappableBlockType &sblock, const std::pair< bool, time_type > &t)
internal_block_type & get_internal_block()
Get a reference to the data-block. Has to be acquired.
virtual void initialize(swappable_block_identifier_type sbid, external_block_type)
virtual swappable_block_identifier_type evictable_blocks_pop()
static uint_pair max()
return an uint_pair instance containing the largest value possible
Definition: uint_types.hpp:226
internal_block_type * internal_data
external_data.valid if no associated space on disk
typename SwappableBlockType::internal_block_type internal_block_type
void init(block_scheduler_algorithm_type *old_algo)
static size_t disk_allocation_offset
addressable_fifo_queue< swappable_block_identifier_type > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
void wait_on_read(const scheduled_blocks_iterator &schedule_meta)
tlx::counting_ptr< request > request_ptr
A reference counting pointer for request.
Definition: request.hpp:43
scheduled_blocks_type scheduled_blocks
Holds not internal swappable_blocks, whose next access has already been scheduled.
void wait_on_write(const write_scheduled_blocks_iterator &writing_block)
internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
std::map< swappable_block_identifier_type, scheduled_block_meta > scheduled_blocks_type
internal_block_type & get_internal_block(const swappable_block_identifier_type sbid) const
bool is_external() const
If it has an external_block that holds valid data.
bool is_evictable() const
If it holds an internal_block but does not need it.
virtual void swappable_blocks_resize(swappable_block_identifier_type)
typename block_scheduler_type::internal_block_type internal_block_type
void release()
Release the block, i.e. subduct a reference. Has to be acquired.
static constexpr bool debug
void attach_internal_block(internal_block_type *iblock)
Attach an internal_block, making the block internal. Has to be not internal.
block_scheduler_algorithm_online_lru(block_scheduler_type &bs)
std::set< swappable_block_identifier_type > scheduled_evictable_blocks
external_block_type external_data
prediction_sequence_element(block_scheduler_operation op, swappable_block_identifier_type id, time_type time)
prediction_sequence_type prediction_sequence
Block scheduling algorithm caching via the longest forward distance policy (offline).
std::stack< internal_block_type * > free_internal_blocks
holds free internal_blocks with attributes reset.
typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
internal_block_type & acquire()
void fill_default()
Fill block with default data, is supposed to be overwritten by subclass. Has to be internal...
void operation_done(scheduled_blocks_iterator &schedule_meta)
void read_sync()
Read synchronously from external_block to internal_block. Has to be internal and have an external_blo...
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)=0
bool valid() const noexcept
test for a non-nullptr pointer
void initialize(const swappable_block_identifier_type sbid, external_block_type eblock)
typename write_scheduled_blocks_type::iterator write_scheduled_blocks_iterator
block_scheduler_algorithm_offline_lfd(block_scheduler_algorithm_type *old)
block_scheduler_algorithm< SwappableBlockType > * switch_algorithm_to(block_scheduler_algorithm< SwappableBlockType > *new_algo)
Pseudo block scheduling algorithm only recording the request sequence.
void delete_block(const BID< BlockSize > &bid)
void return_free_internal_block(internal_block_type *iblock)
void return_free_internal_block_to_block_scheduler(internal_block_type *iblock)
Return an internal_block to the block_scheduler.
block_scheduler_algorithm_online_lru(block_scheduler_algorithm_type *old)
Container::value_type pop_begin(Container &c)
Definition: utils.hpp:133
internal_block_type * get_free_internal_block()
typename write_scheduled_blocks_type::reference write_scheduled_blocks_reference
std::pair< handle, bool > insert(const KeyType &e, const PriorityType o)
virtual swappable_block_identifier_type evictable_blocks_pop()
block_scheduler(const size_t max_internal_memory)
request_ptr read_async(completion_handler on_cmpl=completion_handler())
block_scheduler_algorithm< SwappableBlockType > * get_current_algorithm() const
Return the current algorithm.
virtual void deinitialize(swappable_block_identifier_type sbid)
typename block_scheduler_type::block_scheduler_operation block_scheduler_operation
std::stack< swappable_block_identifier_type > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
bool shall_keep_internal_block(const scheduled_blocks_iterator &schedule_meta, const bool ignore_first=true) const
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
external_block_type extract_external_block()
virtual void deinitialize(swappable_block_identifier_type sbid)=0
typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
virtual void deinitialize(swappable_block_identifier_type sbid)
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
bool is_initialized(const swappable_block_identifier_type sbid) const
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
void unused(Types &&...)
Definition: unused.hpp:20
virtual bool is_initialized(const swappable_block_identifier_type sbid) const
void return_free_internal_block(internal_block_type *iblock)
Return an internal_block to the freelist.
const internal_block_type & get_internal_block() const
Get a reference to the data-block. Has to be acquired.
tlx::delegate< void(request *r, bool success)> completion_handler
completion handler
Definition: request.hpp:46
typename internal_block_type::bid_type external_block_type
typename block_scheduler_type::external_block_type external_block_type
void clean_sync()
Write synchronously from internal_block to external_block if necessary.
typename scheduled_blocks_type::reference scheduled_blocks_reference
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
virtual void release(swappable_block_identifier_type sbid, const bool dirty)=0
swappable_block_identifier_type allocate_swappable_block()
std::stack< internal_block_type * > internal_blocks_blocks
Stores pointers to arrays of internal_blocks. Used to deallocate them only.
void initialize(external_block_type eblock)
block_scheduler_algorithm_simulation(block_scheduler_algorithm_type *old)
void swap(CountingPtr< A, D > &a1, CountingPtr< A, D > &a2) noexcept
std::remove_const< Integral >::type div_ceil(Integral n, Integral2 d)
Definition: utils.hpp:64
block_scheduler_algorithm * get_algorithm_from_block_scheduler()
swappable_block()
Create in uninitialized state.
static const size_t raw_block_size
external_block_type extract_external_block(const swappable_block_identifier_type sbid)
bool is_acquired() const
If it is acquired.
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
void release(const swappable_block_identifier_type sbid, const bool dirty)
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
virtual swappable_block_identifier_type evictable_blocks_pop()
typename block_scheduler_algorithm_type::time_type time_type
virtual bool evictable_blocks_empty()=0
std::list< prediction_sequence_element > prediction_sequence_type
typename block_scheduler_type::prediction_sequence_type prediction_sequence_type
FOXXLL library namespace
std::basic_string< char, std::char_traits< char >, Allocator< char > > string
string with Manager tracking
Definition: allocator.hpp:220
write_read_request * schedule_write(const swappable_block_identifier_type sbid)
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)=0
static instance_pointer get_instance()
return instance or create base instance if empty
Definition: singleton.hpp:41
internal_block_type * get_free_internal_block_from_block_scheduler()
block_scheduler_algorithm_type * give_up(std::string err_msg="detected some error in the prediction sequence")
void init(block_scheduler_algorithm_type *old_algo)
block_scheduler_algorithm(block_scheduler_type &bs)
High-performance smart pointer used as a wrapping reference counting pointer.
typename SwappableBlockType::external_block_type external_block_type
void return_free_internal_block(internal_block_type *iblock)
virtual swappable_block_identifier_type evictable_blocks_pop()
request_ptr clean_async(completion_handler on_cmpl=completion_handler())
addressable_priority_queue< swappable_block_identifier_type, priority > evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
std::vector< SwappableBlockType > & swappable_blocks
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
void deinitialize(const swappable_block_identifier_type sbid)
Drop all data in the given block, freeing in- and external memory.
Block scheduling algorithm caching via the least recently used policy (online).
block_scheduler_algorithm< SwappableBlockType > * algo
Block manager class.
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
bool shall_be_cleaned(const scheduled_blocks_iterator &schedule_meta) const
static uint_pair min()
return an uint_pair instance containing the smallest value possible
Definition: uint_types.hpp:217
typename std::vector< SwappableBlockType >::size_type swappable_block_identifier_type
virtual swappable_block_identifier_type evictable_blocks_pop()=0
bool is_dirty() const
If the external_block does not hold valid data.
void wait_on_write(const scheduled_blocks_iterator &schedule_meta)
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
std::map< swappable_block_identifier_type, write_read_request * > write_scheduled_blocks_type
internal_block_type * deinitialize()
void free_swappable_block(const swappable_block_identifier_type sbid)
const prediction_sequence_type & get_prediction_sequence() const
std::priority_queue< swappable_block_identifier_type, std::vector< swappable_block_identifier_type >, std::greater< swappable_block_identifier_type > > free_swappable_blocks
holds indices of free swappable_blocks with attributes reset.
block_scheduler_algorithm(block_scheduler_algorithm *old)
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)=0
std::pair< handle, bool > insert(const KeyType &e)
typename std::vector< SwappableBlockType >::iterator swappable_blocks_iterator
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
void new_block(const DiskAssignFunctor &functor, BID< BlockSize > &bid, size_t alloc_offset=0)
Allocates a new block according to the strategy given by functor and stores the block identifier to b...
void return_free_internal_block(internal_block_type *iblock)
typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
request_ptr write(const bid_type &bid, completion_handler on_complete=completion_handler())
Writes block to the disk(s).
typename block_scheduler_type::time_type time_type
Request object encapsulating basic properties like file and offset.
Definition: request.hpp:49
internal_block_type * get_ready_block(const scheduled_blocks_iterator &schedule_meta)
#define TLX_LOG1
Definition: core.hpp:145
BID< raw_size > bid_type
typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
bool is_initialized() const
If it has some valid data (in- or external).
bool make_dirty_if(const bool make_dirty)
std::set< swappable_block_identifier_type > free_evictable_blocks
Holds swappable blocks, whose internal block can be freed, i.e. that are internal but unacquired...
typename block_scheduler_type::prediction_sequence_element prediction_sequence_element_type
virtual void initialize(swappable_block_identifier_type sbid, external_block_type eblock)
void wait_on_write(const swappable_block_identifier_type &writing_block)
typename scheduled_blocks_type::iterator scheduled_blocks_iterator
bool shall_be_read(const scheduled_blocks_iterator &schedule_meta, const bool ignore_first=true) const
std::deque< std::pair< bool, time_type > > next_use
Stores for the sequence of releases extracted from the prediction_sequence: (true, timestamp of the blocks next acquire) if it is acquired next (false, 0) if it is deinitialized next (false, 1) if it is not accessed any more (false, 2) if it is extracted next (false, 3) if it is initialized next.
bool operator<(const uint_pair &b) const
less-than comparison operator
Definition: uint_types.hpp:187
bool has_external_block() const
If it has an external_block.
virtual void swappable_blocks_resize(swappable_block_identifier_type size)
virtual const prediction_sequence_type & get_prediction_sequence() const
request_ptr read(const bid_type &bid, completion_handler on_complete=completion_handler())
Reads block from the disk(s).
block_scheduler_algorithm_simulation(block_scheduler_type &bs)
typename block_scheduler_type::internal_block_type internal_block_type
static const size_t max_internal_blocks_alloc_at_once
typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type
bool try_interrupt_read(const write_scheduled_blocks_iterator &writing_block)
virtual void release(swappable_block_identifier_type sbid, const bool dirty)
virtual bool is_initialized(const swappable_block_identifier_type sbid) const
Interface of a block scheduling algorithm.
internal_block_type * detach_internal_block()
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)
void schedule_read(scheduled_blocks_iterator block_to_read)
std::vector< SwappableBlockType > swappable_blocks
temporary blocks that will not be needed after algorithm termination.
virtual internal_block_type & acquire(const swappable_block_identifier_type sbid, const bool uninitialized=false)
bool is_internal() const
If it has an internal_block. The internal_block implicitly holds valid data.
#define TLX_LOG
Default logging method: output if the local debug variable is true.
Definition: core.hpp:141
block_scheduler_algorithm_offline_lru_prefetching(block_scheduler_algorithm_type *old)
virtual void deinitialize(swappable_block_identifier_type sbid)
virtual external_block_type extract_external_block(swappable_block_identifier_type sbid)