Commit bcb9817f authored by neverlord's avatar neverlord

performance tests

parent 920472e6
include ../Makefile.rules
CXX = /usr/bin/g++-4.6
CXXFLAGS = -std=c++0x -pedantic -Wall -Wextra -O2
#CXX = /opt/local/bin/g++-mp-4.5
#CXX = /opt/local/bin/g++-mp-4.6
#CXXFLAGS = -std=c++0x -pedantic -Wall -Wextra -g -O0 -I/opt/local/include/
#CXXFLAGS = -std=c++0x -pedantic -Wall -Wextra -O2 -I/opt/local/include/
#LIBS = -L/opt/local/lib -lboost_thread-mt -L../ -lcppa
INCLUDES = -I./ -I../
INCLUDES = -I./
LIBS = -pthread
FLAGS = -DCACHE_LINE_SIZE=64
EXECUTABLE = ../queue_test
EXECUTABLE = queue_test
HEADERS = sutter_list.hpp
HEADERS = blocking_cached_stack2.hpp blocking_cached_stack.hpp blocking_sutter_list.hpp cached_stack.hpp defines.hpp intrusive_sutter_list.hpp lockfree_list.hpp sutter_list.hpp
SOURCES = main.cpp
OBJECTS = $(SOURCES:.cpp=.o)
%.o : %.cpp $(HEADERS) $(HEADERS)
%.o : %.cpp $(HEADERS)
$(CXX) $(CXXFLAGS) $(INCLUDES) $(FLAGS) -c $< -o $@
$(EXECUTABLE) : $(OBJECTS) $(HEADERS)
$(CXX) $(LIBS) -L../ -lcppa $(OBJECTS) -o $(EXECUTABLE)
$(CXX) $(LIBS) $(OBJECTS) -o $(EXECUTABLE)
all : $(EXECUTABLE)
......
#ifndef BLOCKING_CACHED_STACK_HPP
#define BLOCKING_CACHED_STACK_HPP
#include <thread>
#include <atomic>
#include <boost/thread.hpp>
#include "defines.hpp"
......@@ -11,115 +11,115 @@ template<typename T>
class blocking_cached_stack
{
// singly linked list, serves as cache
T* m_head;
char m_pad1[CACHE_LINE_SIZE - sizeof(T*)];
// modified by consumers
std::atomic<T*> m_stack;
char m_pad2[CACHE_LINE_SIZE - sizeof(std::atomic<T*>)];
// locked on enqueue/dequeue operations to/from an empty list
boost::mutex m_mtx;
boost::condition_variable m_cv;
typedef boost::unique_lock<boost::mutex> lock_type;
// read all elements of m_stack, convert them to FIFO order and store
// them in m_head
// precondition: m_head == nullptr
bool consume_stack()
{
T* e = m_stack.load();
while (e)
{
if (m_stack.compare_exchange_weak(e, 0))
{
// m_stack is now empty (m_stack == nullptr)
while (e)
{
T* next = e->next;
// enqueue to m_head
e->next = m_head;
m_head = e;
// next iteration
e = next;
}
return true;
}
}
// nothing to consume
return false;
}
void wait_for_data()
{
if (!m_head && !(m_stack.load()))
{
lock_type lock(m_mtx);
while (!(m_stack.load())) m_cv.wait(lock);
}
}
// singly linked list, serves as cache
T* m_head;
char m_pad1[CACHE_LINE_SIZE - sizeof(T*)];
// modified by consumers
std::atomic<T*> m_stack;
char m_pad2[CACHE_LINE_SIZE - sizeof(std::atomic<T*>)];
// locked on enqueue/dequeue operations to/from an empty list
std::mutex m_mtx;
std::condition_variable m_cv;
typedef std::unique_lock<std::mutex> lock_type;
// read all elements of m_stack, convert them to FIFO order and store
// them in m_head
// precondition: m_head == nullptr
bool consume_stack()
{
T* e = m_stack.load();
while (e)
{
if (m_stack.compare_exchange_weak(e, 0))
{
// m_stack is now empty (m_stack == nullptr)
while (e)
{
T* next = e->next;
// enqueue to m_head
e->next = m_head;
m_head = e;
// next iteration
e = next;
}
return true;
}
}
// nothing to consume
return false;
}
void wait_for_data()
{
if (!m_head && !(m_stack.load()))
{
lock_type lock(m_mtx);
while (!(m_stack.load())) m_cv.wait(lock);
}
}
public:
blocking_cached_stack() : m_head(0)
{
m_stack = 0;
}
~blocking_cached_stack()
{
do
{
while (m_head)
{
T* next = m_head->next;
delete m_head;
m_head = next;
}
}
// repeat if m_stack is not empty
while (consume_stack());
}
void push(T* what)
{
T* e = m_stack.load();
for (;;)
{
what->next = e;
if (!e)
{
lock_type lock(m_mtx);
if (m_stack.compare_exchange_weak(e, what))
{
m_cv.notify_one();
return;
}
}
// compare_exchange_weak stores the
// new value to e if the operation fails
else if (m_stack.compare_exchange_weak(e, what)) return;
}
}
T* try_pop()
{
if (m_head || consume_stack())
{
T* result = m_head;
m_head = m_head->next;
return result;
}
return 0;
}
T* pop()
{
wait_for_data();
return try_pop();
}
blocking_cached_stack() : m_head(0)
{
m_stack = 0;
}
~blocking_cached_stack()
{
do
{
while (m_head)
{
T* next = m_head->next;
delete m_head;
m_head = next;
}
}
// repeat if m_stack is not empty
while (consume_stack());
}
void push(T* what)
{
T* e = m_stack.load();
for (;;)
{
what->next = e;
if (!e)
{
lock_type lock(m_mtx);
if (m_stack.compare_exchange_weak(e, what))
{
m_cv.notify_one();
return;
}
}
// compare_exchange_weak stores the
// new value to e if the operation fails
else if (m_stack.compare_exchange_weak(e, what)) return;
}
}
T* try_pop()
{
if (m_head || consume_stack())
{
T* result = m_head;
m_head = m_head->next;
return result;
}
return 0;
}
T* pop()
{
wait_for_data();
return try_pop();
}
};
......
......@@ -11,129 +11,129 @@ template<typename T>
class blocking_cached_stack2
{
// singly linked list, serves as cache
T* m_head;
char m_pad1[CACHE_LINE_SIZE - sizeof(T*)];
// modified by consumers
std::atomic<T*> m_stack;
char m_pad2[CACHE_LINE_SIZE - sizeof(std::atomic<T*>)];
T* m_dummy;
char m_pad3[CACHE_LINE_SIZE - sizeof(T)];
// locked on enqueue/dequeue operations to/from an empty list
boost::mutex m_mtx;
boost::condition_variable m_cv;
typedef boost::unique_lock<boost::mutex> lock_type;
// read all elements of m_stack, convert them to FIFO order and store
// them in m_head
// precondition: m_head == nullptr
void consume_stack()
{
T* e = m_stack.load();
while (e)
{
// enqueue dummy instead of nullptr to reduce
// lock operations
if (m_stack.compare_exchange_weak(e, m_dummy))
{
// m_stack is now empty (m_stack == m_dummy)
// m_dummy marks always the end of the stack
while (e && e != m_dummy)
{
T* next = e->next;
// enqueue to m_head
e->next = m_head;
m_head = e;
// next iteration
e = next;
}
return;
}
}
// nothing to consume
}
void wait_for_data()
{
if (!m_head)
{
T* e = m_stack.load();
while (e == m_dummy)
{
if (m_stack.compare_exchange_weak(e, 0)) e = 0;
}
if (!e)
{
lock_type lock(m_mtx);
while (!(m_stack.load())) m_cv.wait(lock);
}
consume_stack();
}
}
void delete_head()
{
while (m_head)
{
T* next = m_head->next;
delete m_head;
m_head = next;
}
}
// singly linked list, serves as cache
T* m_head;
char m_pad1[CACHE_LINE_SIZE - sizeof(T*)];
// modified by consumers
std::atomic<T*> m_stack;
char m_pad2[CACHE_LINE_SIZE - sizeof(std::atomic<T*>)];
T* m_dummy;
char m_pad3[CACHE_LINE_SIZE - sizeof(T)];
// locked on enqueue/dequeue operations to/from an empty list
std::mutex m_mtx;
std::condition_variable m_cv;
typedef std::unique_lock<std::mutex> lock_type;
// read all elements of m_stack, convert them to FIFO order and store
// them in m_head
// precondition: m_head == nullptr
void consume_stack()
{
T* e = m_stack.load();
while (e)
{
// enqueue dummy instead of nullptr to reduce
// lock operations
if (m_stack.compare_exchange_weak(e, m_dummy))
{
// m_stack is now empty (m_stack == m_dummy)
// m_dummy marks always the end of the stack
while (e && e != m_dummy)
{
T* next = e->next;
// enqueue to m_head
e->next = m_head;
m_head = e;
// next iteration
e = next;
}
return;
}
}
// nothing to consume
}
void wait_for_data()
{
if (!m_head)
{
T* e = m_stack.load();
while (e == m_dummy)
{
if (m_stack.compare_exchange_weak(e, 0)) e = 0;
}
if (!e)
{
lock_type lock(m_mtx);
while (!(m_stack.load())) m_cv.wait(lock);
}
consume_stack();
}
}
void delete_head()
{
while (m_head)
{
T* next = m_head->next;
delete m_head;
m_head = next;
}
}
public:
blocking_cached_stack2() : m_head(0)
{
m_stack = 0;
m_dummy = new T;
}
~blocking_cached_stack2()
{
delete_head();
T* e = m_stack.load();
if (e && e != m_dummy)
{
consume_stack();
delete_head();
}
delete m_dummy;
}
void push(T* what)
{
T* e = m_stack.load();
for (;;)
{
what->next = e;
if (!e)
{
lock_type lock(m_mtx);
if (m_stack.compare_exchange_weak(e, what))
{
m_cv.notify_one();
return;
}
}
// compare_exchange_weak stores the
// new value to e if the operation fails
else if (m_stack.compare_exchange_weak(e, what)) return;
}
}
T* pop()
{
wait_for_data();
T* result = m_head;
m_head = m_head->next;
return result;
}
blocking_cached_stack2() : m_head(0)
{
m_stack = 0;
m_dummy = new T;
}
~blocking_cached_stack2()
{
delete_head();
T* e = m_stack.load();
if (e && e != m_dummy)
{
consume_stack();
delete_head();
}
delete m_dummy;
}
void push(T* what)
{
T* e = m_stack.load();
for (;;)
{
what->next = e;
if (!e)
{
lock_type lock(m_mtx);
if (m_stack.compare_exchange_weak(e, what))
{
m_cv.notify_one();
return;
}
}
// compare_exchange_weak stores the
// new value to e if the operation fails
else if (m_stack.compare_exchange_weak(e, what)) return;
}
}
T* pop()
{
wait_for_data();
T* result = m_head;
m_head = m_head->next;
return result;
}
};
......
......@@ -15,99 +15,99 @@ template<typename T>
class blocking_sutter_list
{
struct node
{
node(T* val = 0) : value(val), next(0) { }
T* value;
std::atomic<node*> next;
char pad[CACHE_LINE_SIZE - sizeof(T*)- sizeof(std::atomic<node*>)];
};
struct node
{
node(T* val = 0) : value(val), next(0) { }
T* value;
std::atomic<node*> next;
char pad[CACHE_LINE_SIZE - sizeof(T*)- sizeof(std::atomic<node*>)];
};
// one consumer at a time
node* m_first;
char m_pad1[CACHE_LINE_SIZE - sizeof(node*)];
// one consumer at a time
node* m_first;
char m_pad1[CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
node* m_last;
char m_pad2[CACHE_LINE_SIZE - sizeof(node*)];
// for one producers at a time
node* m_last;
char m_pad2[CACHE_LINE_SIZE - sizeof(node*)];
// shared among producers
std::atomic<bool> m_producer_lock;
char m_pad3[CACHE_LINE_SIZE - sizeof(std::atomic<bool>)];
// shared among producers
std::atomic<bool> m_producer_lock;
char m_pad3[CACHE_LINE_SIZE - sizeof(std::atomic<bool>)];
// locked on enqueue/dequeue operations to/from an empty list
boost::mutex m_mtx;
boost::condition_variable m_cv;
// locked on enqueue/dequeue operations to/from an empty list
std::mutex m_mtx;
std::condition_variable m_cv;
typedef boost::unique_lock<boost::mutex> lock_type;
typedef std::unique_lock<std::mutex> lock_type;
public:
blocking_sutter_list()
{
m_first = m_last = new node;
m_producer_lock = false;
}
~blocking_sutter_list()
{
while (m_first)
{
node* tmp = m_first;
m_first = tmp->next;
delete tmp;
}
}
// takes ownership of what
void push(T* what)
{
bool consumer_might_sleep = 0;
node* tmp = new node(what);
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
boost::this_thread::yield();
}
// do we have to wakeup a sleeping consumer?
// this is a sufficient condition because m_last->value is 0
// if and only if m_head == m_tail
consumer_might_sleep = (m_last->value == 0);
// publish & swing last forward
m_last->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
// wakeup consumer if needed
if (consumer_might_sleep)
{
lock_type lock(m_mtx);
m_cv.notify_one();
}
}
// polls the queue until an element was dequeued
T* pop()
{
node* first = m_first;
node* next = m_first->next;
if (!next)
{
lock_type lock(m_mtx);
while (!(next = m_first->next))
{
m_cv.wait(lock);
}
}
T* result = next->value; // take it out
next->value = 0; // of the node
// swing first forward
m_first = next;
// delete old dummy
delete first;
// done
return result;
}
blocking_sutter_list()
{
m_first = m_last = new node;
m_producer_lock = false;
}
~blocking_sutter_list()
{
while (m_first)
{
node* tmp = m_first;
m_first = tmp->next;
delete tmp;
}
}
// takes ownership of what
void push(T* what)
{
bool consumer_might_sleep = 0;
node* tmp = new node(what);
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
std::this_thread::yield();
}
// do we have to wakeup a sleeping consumer?
// this is a sufficient condition because m_last->value is 0
// if and only if m_head == m_tail
consumer_might_sleep = (m_last->value == 0);
// publish & swing last forward
m_last->next = tmp;
m_last = tmp;
// release exclusivity
m_producer_lock = false;
// wakeup consumer if needed
if (consumer_might_sleep)
{
lock_type lock(m_mtx);
m_cv.notify_one();
}
}
// polls the queue until an element was dequeued
T* pop()
{
node* first = m_first;
node* next = m_first->next;
if (!next)
{
lock_type lock(m_mtx);
while (!(next = m_first->next))
{
m_cv.wait(lock);
}
}
T* result = next->value; // take it out
next->value = 0; // of the node
// swing first forward
m_first = next;
// delete old dummy
delete first;
// done
return result;
}
};
......
......@@ -95,7 +95,7 @@ class cached_stack
T* result = try_pop();
while (!result)
{
boost::this_thread::yield();
std::this_thread::yield();
result = try_pop();
}
return result;
......
......@@ -62,7 +62,7 @@ class intrusive_sutter_list
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
boost::this_thread::yield();
std::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
......@@ -97,7 +97,7 @@ class intrusive_sutter_list
T result;
while (!try_pop(result))
{
boost::this_thread::yield();
std::this_thread::yield();
}
return result;
}
......
......@@ -90,7 +90,7 @@ class lockfree_list
T result;
while (!try_pop(result))
{
boost::this_thread::yield();
std::this_thread::yield();
}
return result;
}
......
This diff is collapsed.
......@@ -59,7 +59,7 @@ class sutter_list
// acquire exclusivity
while (m_producer_lock.exchange(true))
{
boost::this_thread::yield();
std::this_thread::yield();
}
// publish & swing last forward
m_last->next = tmp;
......@@ -96,7 +96,7 @@ class sutter_list
T* result = try_pop();
while (!result)
{
boost::this_thread::yield();
std::this_thread::yield();
result = try_pop();
}
return result;
......
......@@ -9,6 +9,19 @@
#include "cppa/util/single_reader_queue.hpp"
//#define DEBUG_RESULTS
// "config"
namespace {
const size_t slave_messages = 1000000;
const size_t trials = 10;
} // namespace <anonymous>
using cppa::util::single_reader_queue;
using std::cout;
......@@ -107,7 +120,7 @@ class locked_queue
}
}
void push(element_type* new_element)
void push_back(element_type* new_element)
{
lock_type guard(m_mtx);
if (m_pub.empty())
......@@ -150,43 +163,75 @@ void slave(Queue& q, size_t from, size_t to)
}
template<typename Queue, size_t num_slaves, size_t num_slave_msgs>
void master(Queue& q)
void master()
{
static const size_t num_msgs = (num_slaves) * (num_slave_msgs);
static const size_t calc_result = ((num_msgs)*(num_msgs + 1)) / 2;
boost::timer t0;
for (size_t i = 0; i < num_slaves; ++i)
{
size_t from = (i * num_slave_msgs) + 1;
size_t to = from + num_slave_msgs;
boost::thread(slave<Queue>, boost::ref(q), from, to).detach();
}
size_t result = 0;
size_t min_val = calc_result;
size_t max_val = 0;
for (size_t i = 0; i < num_msgs; ++i)
//cout << num_slaves << " workers; running test";
//cout.flush();
double elapsed[trials];
for (size_t i = 0; i < trials; ++i)
{
queue_element* e = q.pop();
result += e->value;
min_val = std::min(min_val, e->value);
max_val = std::max(max_val, e->value);
delete e;
//cout << " ... " << (i + 1);
//cout.flush();
Queue q;
boost::timer t0;
for (size_t j = 0; j < num_slaves; ++j)
{
size_t from = (j * num_slave_msgs) + 1;
size_t to = from + num_slave_msgs;
boost::thread(slave<Queue>, boost::ref(q), from, to).detach();
}
size_t result = 0;
# ifdef DEBUG_RESULTS
size_t min_val = calc_result;
size_t max_val = 0;
# endif
for (size_t j = 0; j < num_msgs; ++j)
{
queue_element* e = q.pop();
result += e->value;
# ifdef DEBUG_RESULTS
min_val = std::min(min_val, e->value);
max_val = std::max(max_val, e->value);
# endif
delete e;
}
if (result != calc_result)
{
cerr << "ERROR: result = " << result
<< " (should be: " << calc_result << ")"
# ifdef DEBUG_RESULTS
<< endl << "min: " << min_val
<< endl << "max: " << max_val
# endif
<< endl;
}
elapsed[i] = t0.elapsed();
//cout << t0.elapsed() << " " << num_slaves << endl;
}
if (result != calc_result)
//cout << endl;
double sum = 0;
//cout << "runtimes = { ";
for (size_t i = 0; i < trials; ++i)
{
cerr << "ERROR: result = " << result
<< " (should be: " << calc_result << ")" << endl
<< "min: " << min_val << endl
<< "max: " << max_val << endl;
//cout << (i == 0 ? "" : ", ") << elapsed[i];
sum += elapsed[i];
}
cout << t0.elapsed() << " " << num_slaves << endl;
//cout << " }" << endl;
//cout << "AVG = " << (sum / trials) << endl;
cout << (sum / trials) << " " << num_slaves << endl;
}
namespace { const size_t slave_messages = 1000000; }
template<size_t Pos, size_t Max, size_t Step,
template<size_t> class Stmt>
struct static_for
......@@ -222,8 +267,7 @@ struct test_step
template<typename QueueToken>
static inline void _(QueueToken)
{
typename QueueToken::type q;
boost::thread t0(master<typename QueueToken::type, NumThreads, slave_messages>, boost::ref(q));
boost::thread t0(master<typename QueueToken::type, NumThreads, slave_messages>);
t0.join();
}
};
......@@ -237,8 +281,13 @@ void test_q_impl()
void test__queue_performance()
{
cout << "Format: "
"(average value of 10 runs) "
// "(standard deviation) "
"(number of worker threads)"
<< endl;
cout << "locked_queue:" << endl;
// test_q_impl<locked_queue<queue_element>>();
test_q_impl<locked_queue<queue_element>>();
cout << endl;
cout << "single_reader_queue:" << endl;
test_q_impl<single_reader_queue<queue_element>>();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment