c++多线程并行计算

《C++并发编程实战》随笔

#include <iostream>
#include <algorithm>
#include <numeric>
#include <thread>
#include <vector>
#include <functional>
template<class Iterator,class T>
struct accumulate_block {
    void operator()(Iterator first,Iterator last,T &result) {
        result = std::accumulate(first,last,result);
    }
};

template<class Iterator,class T>
T parallel_accumulate(Iterator first,Iterator last,T init) {
    unsigned long const length = std::distance(first,last);
    if(!length)return init;
    // 假设最大线程为25
    unsigned long const min_pre_thread = 25;
    // 按照元素长度来分配线程,以免元素过少分配过量的线程
    unsigned long const max_threads = 
        (length + min_pre_thread) / min_pre_thread;
    unsigned long const hardware_threads = 
        std::thread::hardware_concurrency();
    //取本机可使用的最小执行线程
    unsigned long const num_threads = 
        std::min(hardware_threads != 0 ? hardware_threads : 2,max_threads);
    //计算每个线程需计算的量
    unsigned long const block_size = length / num_threads;
    std::vector<T> result(num_threads);
    std::vector<std::thread> threads(num_threads - 1);
    Iterator block_start = first;
    for(unsigned long i = 0;i < num_threads - 1;i ++) {
        Iterator block_end = block_start;
        std::advance(block_end,block_size);
        threads[i] = std::thread {
            accumulate_block<Iterator,T>(),
            block_start,
            block_end,
            std::ref(result[i])
        };
        block_start = block_end;
    };
    accumulate_block<Iterator,T>()(
        block_start,last,result[num_threads - 1]
    );
    for(auto &&th:threads)
        th.join();
    return std::accumulate(result.begin(),result.end(),init);
}

int main() {
    std::vector<int> ans {1,2,3,4,5};
    auto res = parallel_accumulate(ans.begin(),ans.end(),0);
    std::cout << res << "\n";
    return 0;
}

原文链接: https://www.cnblogs.com/xianning7/p/14771459.html

欢迎关注

微信关注下方公众号,第一时间获取干货硬货;公众号内回复【pdf】免费获取数百本计算机经典书籍

原创文章受到原创版权保护。转载请注明出处:https://www.ccppcoding.com/archives/210564

非原创文章文中已经注明原地址,如有侵权,联系删除

关注公众号【高性能架构探索】,第一时间获取最新文章

转载文章受原作者版权保护。转载请注明原作者出处!

(0)
上一篇 2023年2月13日 上午12:23
下一篇 2023年2月13日 上午12:23

相关推荐