【Rust】rustでセマフォ(semaphore)を使いたい

mainじゃなくて関数として使いたいな…

use std_semaphore::Semaphore;
use std::thread;
use std::time::Duration;

static TOTAL_SPOTS: u32 = 3;

fn main() {
    let sem = Semaphore::new(TOTAL_SPOTS.try_into().unwrap());

    let mut parked_cars: Vec<u32> = Vec::new();

    let car: u32 = 1;

    // enter
    sem.acquire();
    parked_cars.push(car);
    thread::sleep(Duration::from_millis(500));
    println!("{:?}", parked_cars);

    // exit
    parked_cars.clear();
    sem.release();

    println!("{:?}", parked_cars);
}

Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.17s
Running `target/debug/parallel`
[1]
[]

【並列処理】セマフォ(semaphore)とmutex Lock

import typing as T
import time
import random
from threading import Thread, Semaphore, Lock

TOTAL_SPOTS = 3

class Garage:

    def __init__(self) -> None:
        self.semaphore = Semaphore(TOTAL_SPOTS)
        self.cars_lock = Lock()
        self.parked_cars: T.LIST[str] = []

    def count_parked_cars(self) -> int:
        return len(self.parked_cars)

    def enter(self, car_name: str) -> None:
        self.semaphore.acquire()
        self.cars_lock.acquire()
        self.parked_cars.append(car_name)
        print(f"{car_name} parked")
        self.cars_lock.release()

    def exit(self, car_name: str) -> None:
        self.cars_lock.acquire()
        self.parked_cars.remove(car_name)
        print(f"{car_name} leaving")
        self.semaphore.release()
        self.cars_lock.release()

def park_car(garage: Garage, car_name: str) -> None:
    garage.enter(car_name)
    time.sleep(random.uniform(1, 2))
    garage.exit(car_name)

def test_garage(garage: Garage, number_of_cars: int = 10) -> None:
    threads = []
    for car_num in range(number_of_cars):
        t = Thread(target=park_car,
            args=(garage, f"Car #{car_num}"))
        threads.append(t)
        t.start()

    for thread in threads:
        thread.join()

if __name__ == "__main__":
    number_of_cars = 10
    garage = Garage()
    test_garage(garage, number_of_cars)

    print("Number of parked cars after a busy day:")
    print(f"Actual: {garage.count_parked_cars()}\nExpected: 0")

e$ python3 semaphore.py
Car #0 parked
Car #1 parked
Car #2 parked
Car #1 leaving
Car #3 parked
Car #2 leaving
Car #4 parked
Car #0 leaving
Car #5 parked
Car #3 leaving
Car #6 parked
Car #4 leaving
Car #7 parked
Car #5 leaving
Car #8 parked
Car #6 leaving
Car #9 parked
Car #7 leaving
Car #8 leaving
Car #9 leaving
Number of parked cars after a busy day:
Actual: 0
Expected: 0

Fork/Joinによる並列処理

データをワーカー分にchunkするのがポイントか。

import typing as T
import random
from multiprocessing.pool import ThreadPool

Summary = T.Mapping[int, int]

def process_votes(pile: T.List[int], worker_count: int = 4) -> Summary:
    vote_count = len(pile)
    vpw = vote_count // worker_count

    vote_piles = [
        pile[i * vpw:(i + 1) * vpw]
        for i in range(worker_count)
    ]

    with ThreadPool(worker_count) as pool:
        worker_summaries = pool.map(process_pile, vote_piles)
    
    total_summary = {}
    for worker_summary in worker_summaries:
        print(f"Votes from staff member: {worker_summary}")
        for candidate, count in worker_summary.items():
            if candidate in total_summary:
                total_summary[candidate] += count
            else:
                total_summary[candidate] = count
    
    return total_summary

def process_pile(pile: T.List[int]) -> Summary:
    summary = {}
    for vote in pile:
        if vote in summary:
            summary[vote] += 1
        else:
            summary[vote] = 1
    return summary

if __name__ == "__main__":
    num_candidates = 3
    num_voters = 100
    pile = [random.randint(1, num_candidates) for _ in range(num_voters)]
    counts = process_votes(pile)
    print(f"Total number of votes: {counts}")

$ python3 count_votes_sequential.py
Votes from staff member: {3: 9, 2: 13, 1: 3}
Votes from staff member: {1: 5, 3: 8, 2: 12}
Votes from staff member: {3: 10, 2: 5, 1: 10}
Votes from staff member: {1: 10, 3: 8, 2: 7}
Total number of votes: {3: 35, 2: 37, 1: 28}

これは凄い… これをRustで書く

use rand::Rng;
use std::collections::HashMap;
use std::sync::Mutex;

static Summaries: Mutex<Vec<HashMap<u32, u32>>> = Mutex::new(Vec::new());

fn process_vote(pile: Vec<u32>, worker_count: u32) {
    let vote_count = pile.len();
    let vpw = vote_count / worker_count as usize;
    println!("{}", vpw);
    let mut vote_piles : Vec<Vec<u32>> = Vec::new();
    for i in 0..worker_count {
        let chunk: Vec<u32> = (&pile[i as usize*vpw ..(i+1)as usize * vpw]).to_vec();
        vote_piles.push(chunk)
    }
    println!("{:?}", vote_piles);

    let pool = rayon::ThreadPoolBuilder::new().num_threads(worker_count as usize).build().unwrap();
    for vote_pile in vote_piles {
        pool.install(move || {
            let result = process_pile(vote_pile); 
            Summaries.lock().unwrap().push(result);
        });
    }
    println!("{:?}", Summaries);
    let mut total_summary = HashMap::new();
    for summary in Summaries.lock().unwrap().clone().into_iter() {
        for (candidate, count) in summary {
            if total_summary.get(&candidate) != None {
                let n = total_summary.get(&candidate).unwrap();
                total_summary.insert(candidate, count + n);
            } else {
                total_summary.insert(candidate, count);
            }
        }
    }
    println!("{:?}", total_summary);
}

fn process_pile(pile: Vec<u32>) -> HashMap<u32, u32> {
    let mut summary = HashMap::new();
    for vote in pile {
        if summary.get(&vote) != None {
            let count = summary.get(&vote).unwrap();
            summary.insert(vote, count + 1);
        } else {
            summary.insert(vote, 1);
        }
    }
    summary
}

fn main(){    
    
    let num_voters = 500;
    let num_candidate = 3;
    
    let mut pile: Vec<u32> = Vec::new();
    let mut rnd = rand::thread_rng();
    for _ in 0..num_voters {
        pile.push(rnd.gen_range(1..(num_candidate + 1)))
    }
    let summary = process_pile(pile.clone());
    println!("{:?}", summary);

    process_vote(pile, 4);
}

Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.41s
Running `target/debug/parallel`
{2: 172, 1: 172, 3: 156}
125
Mutex { data: [{1: 49, 2: 39, 3: 37}, {2: 49, 1: 38, 3: 38}, {2: 43, 1: 39, 3: 43}, {2: 41, 1: 46, 3: 38}], poisoned: false, .. }
{2: 172, 3: 156, 1: 172}

おおおおおおおおお、なんかすげえええええええええええええ

【Rust】ThreadPoolによる文字検索

全然早くないが使い方間違ってる?

use std::env;
use std::fs;
use std::path::Path;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::time::Instant;
use threadpool::ThreadPool;

fn search_file(filename: String, search_string: String) -> Result<bool, Box<dyn std::error::Error>> {
    for line in BufReader::new(File::open(filename.clone())?).lines() {
        if line.unwrap().contains(&search_string) {
            return Ok(true)
        }
    }
    Ok(false)
}

fn get_files(dirname: &str) -> io::Result<Vec<String>> {
    let mut entries: Vec<String> = Vec::new();
    let dir = Path::new(dirname);
    if dir.is_dir(){
        for entry in fs::read_dir(dirname)? {
            let e = entry?;
            let p = e.path().file_name().unwrap().to_string_lossy().into_owned();
            entries.push(p);
        }
    }
    Ok(entries)
}

fn search_files_sequentially(file_locations: String, search_string: String) {
    let entries: Vec<String> = get_files(&file_locations).unwrap();
    for entry in entries {
        let pass = format!("{}{}", file_locations, entry);
        if search_file(pass, search_string.clone()).unwrap() {
            println!("Found word in file: {}", entry);
        }
    }
}

fn search_files_concurrent(file_locations: String, search_string: String) {
    let entries: Vec<String> = get_files(&file_locations).unwrap();
    let pool = rayon::ThreadPoolBuilder::new().num_threads(4).build().unwrap();
    for entry in entries {
        let pass = format!("{}{}", file_locations, entry);
        let mut value = search_string.clone();
        pool.install(move || {
            if search_file(pass, value).unwrap() {
                println!("Found word in file: {}", entry);
            }
        });
    }
}

fn main(){    
    let now = Instant::now();
    search_files_sequentially("./src/".to_string(), "queue".to_string());
    println!("{:?}", now.elapsed());

    let now = Instant::now();
    search_files_concurrent("./src/".to_string(), "queue".to_string());
    println!("{:?}", now.elapsed());
}

Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.02s
Running `target/debug/parallel queue`
Found word in file: pipeline.rs
Found word in file: main.rs
422.486µs
Found word in file: pipeline.rs
Found word in file: main.rs
701.462µs

【並列処理】ループ処理をスレッドで実行する

import os
import time
import glob
import typing as T
from multiprocessing.pool import ThreadPool

def search_file(file_location: str, search_string: str) -> bool:
    with open(file_location, "r", encoding="utf8") as file:
        return search_string in file.read()

def search_files_concurrently(file_locations: T.List[str], search_string: str) -> None:
    with ThreadPool() as pool:
        results = pool.starmap(search_file,
            ((file_location, search_string) for file_location in file_locations))
        for result, file_name in zip(results, file_locations):
            if result:
                print(f"Found string in file: `{file_name}`")

def search_files_sequentially(file_locations: T.List[str], search_string: str) -> None:
    for file_name in file_locations:
        result = search_file(file_name, search_string)
        if result:
            print(f"Found word in file: `{file_name}`")

if __name__ == "__main__":
    file_locations = list(
        glob.glob(f"{os.path.abspath(os.getcwd())}/*.py"))
    search_string = input("what word are you trying to find?: ")

    start_time = time.perf_counter()
    search_files_concurrently(file_locations, search_string)
    process_time = time.perf_counter() - start_time
    print(f"PROCESS TIME: {process_time}")

vagrant@vagrant:~/dev/rust/parallel/python$ python3 find_files_sequential.py
what word are you trying to find?: queue
Found word in file: `/home/vagrant/dev/rust/parallel/python/thread_pool.py`
Found word in file: `/home/vagrant/dev/rust/parallel/python/pipeline.py`
Found word in file: `/home/vagrant/dev/rust/parallel/python/message_queue.py`
PROCESS TIME: 0.004676494048908353
vagrant@vagrant:~/dev/rust/parallel/python$ python3 find_files_concurrent.py
what word are you trying to find?: queue
Found string in file: `/home/vagrant/dev/rust/parallel/python/thread_pool.py`
Found string in file: `/home/vagrant/dev/rust/parallel/python/pipeline.py`
Found string in file: `/home/vagrant/dev/rust/parallel/python/message_queue.py`
PROCESS TIME: 0.011579621117562056

速度的にはスレッドの方が遅いような気がするが、どうなんだろうか…

【Rust】複数スレッドを同時にループ処理で走らせる【並列処理】

見た目はかなり悪いが、、、thread::spawnで同時にスレッドを走らせることができる。
各スレッドでは全てループ処理で待ち受けておいて、目的を達成したらbreakする。

static Washer_queue: Mutex<VecDeque<u32>> = Mutex::new(VecDeque::new());
static Dryer_queue: Mutex<VecDeque<u32>> = Mutex::new(VecDeque::new());
static Folder_queue: Mutex<VecDeque<u32>> = Mutex::new(VecDeque::new());
static Done_queue: Mutex<VecDeque<u32>> = Mutex::new(VecDeque::new());

fn assemble_laundry(n: u32) {
    for i in 1..(n+1) {
        Washer_queue.lock().unwrap().push_back(i);
    }
}

fn washer() {
    let w = Washer_queue.lock().unwrap().pop_front();
    if w != None {
        println!("washing {:?}...", w.unwrap());
        thread::sleep(Duration::from_millis(300));
        Dryer_queue.lock().unwrap().push_back(w.unwrap());
    }
}

fn dryer() {
    let d = Dryer_queue.lock().unwrap().pop_front();
    if d != None {
        println!("Drying {:?}...", d.unwrap());
        thread::sleep(Duration::from_millis(200));
        Folder_queue.lock().unwrap().push_back(d.unwrap());
    }
}

fn folder() {
    let f = Folder_queue.lock().unwrap().pop_front();
    if f != None {
        println!("Folding {:?}...", f.unwrap());
        thread::sleep(Duration::from_millis(100));
        Done_queue.lock().unwrap().push_back(f.unwrap());
    }
}

fn main() {    
    assemble_laundry(4);
    println!("{:?}", Washer_queue);
    let wash_handle = thread::spawn(|| {
        loop {
            if Washer_queue.lock().unwrap().len() == 0 {
                break;
            }
            washer();
        }
    });
    let dry_handle = thread::spawn(|| {
        loop {
            if Done_queue.lock().unwrap().len() == 4 {
                break;
            }
            dryer();
        }
    });
    let fold_handle = thread::spawn(|| {
        loop {
            if Done_queue.lock().unwrap().len() == 4{
                break;
            }
            folder();
        }
    });
    wash_handle.join().unwrap();
    dry_handle.join().unwrap();
    fold_handle.join().unwrap();
    println!("Washer {:?}", Washer_queue);
    println!("Dryer {:?}", Dryer_queue);
    println!("Folder {:?}", Folder_queue);
    println!("All work finished");
}

Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.20s
Running `target/debug/parallel`
Mutex { data: [1, 2, 3, 4], poisoned: false, .. }
washing 1…
washing 2…
Drying 1…
Folding 1…
washing 3…
Drying 2…
Folding 2…
washing 4…
Drying 3…
Folding 3…
Drying 4…
Folding 4…
Mutex { data: [], poisoned: false, .. }
Mutex { data: [], poisoned: false, .. }
Mutex { data: [], poisoned: false, .. }
All work finished

もうちょっとうまい書き方をしたいが、やりたいこと自体はできている。。

【並列処理】各スレッドでQueueを使ったパイプライン処理

whileのループでqueueから待ち受けるという処理を3つのスレッドで同時に実行している。うむ、中々複雑になってきた。

import time
from queue import Queue
from threading import Thread

Washload = str

class Washer(Thread):
    def __init__(self, in_queue: Queue[Washload],
        out_queue: Queue[Washload]):
        super().__init__()
        self.in_queue = in_queue
        self.out_queue = out_queue

    def run(self) -> None:
        while True:
            washload = self.in_queue.get()
            print(f"Washer: washing{washload}...")
            time.sleep(4)
            self.out_queue.put(f'{washload}')
            self.in_queue.task_done()

class Dryer(Thread):
    def __init__(self, in_queue: Queue[Washload],
        out_queue: Queue[Washload]):
        super().__init__()
        self.in_queue = in_queue
        self.out_queue = out_queue

    def run(self) -> None:
        while True:
            washload = self.in_queue.get()
            print(f"Dryer: dying {washload} ...")
            time.sleep(2)
            self.out_queue.put(f'{washload}')
            self.in_queue.task_done()

class Folder(Thread):
    def __init__(self, in_queue: Queue[Washload]):
        super().__init__()
        self.in_queue = in_queue

    def run(self) -> None:
        while True:
            washload = self.in_queue.get()
            print(f"Folder: folding {washload}...")
            time.sleep(1)
            print(f"Folder: {washload} done!")
            self.in_queue.task_done()

class Pipeline:
    def assemble_laundry_for_washing(self) -> Queue[Washload]:
        washload_count = 4
        washloads_in: Queue[Washload] = Queue(washload_count)
        for washload_num in range(washload_count):
            washloads_in.put(f'Washload #{washload_num}')
        return washloads_in

    def run_concurrently(self) -> None:
        to_be_washed = self.assemble_laundry_for_washing()
        to_be_dried: Queue[Washload] = Queue()
        to_be_folded: Queue[Washload] = Queue()

        Washer(to_be_washed, to_be_dried).start()
        Dryer(to_be_dried, to_be_folded).start()
        Folder(to_be_folded).start()

        to_be_washed.join()
        to_be_dried.join()
        to_be_folded.join()

        print("All done!")

if __name__ == "__main__":
    pipeline = Pipeline()
    pipeline.run_concurrently()

$ python3 pipeline.py
Washer: washingWashload #0…
Washer: washingWashload #1…
Dryer: dying Washload #0 …
Folder: folding Washload #0…
Folder: Washload #0 done!
Washer: washingWashload #2…
Dryer: dying Washload #1 …
Folder: folding Washload #1…
Folder: Washload #1 done!
Washer: washingWashload #3…
Dryer: dying Washload #2 …
Folder: folding Washload #2…
Folder: Washload #2 done!
Dryer: dying Washload #3 …
Folder: folding Washload #3…
Folder: Washload #3 done!
All done!

【並列処理】プリエンプティブマルチタスク

タイムシェアリング方式により、1つのコアでもスレッドが同時に実行されているような印象を与える。
現在のOSは殆どがプリエンプティブマルチタスクの機能がある。

import typing as T
from threading import Thread, Event, Timer, Event

from pacman import get_user_input, compute_game_world, render_next_screen

processor_free = Event()
processor_free_set()
TIME_SLICE = 0.5

class Task(Thread):
    def __init__(self, func: T.Callable[..., None]):
        super().__init__()
        self.func = func

    def run(self) -> None:
        while True:
            processor_free.wait()
            processor_free.clear()
            self.func()

class InterruptService(Timer):
    def __init__(self):
        super().__init__(TIME_SLICE, lambda: None)

    def run(self):
        while not self.finished.wait(self.interval):
            print("Tick!")
            processor_free.set()

def arcade_machine() -> None:
    get_user_input_task = Task(get_user_input)
    compute_game_world_task = Task(compute_game_world)
    render_next_screen_task = Task(render_next_screen)

    InterruptService().start()
    get_user_input_task.start()
    compute_game_world_task.start()
    render_next_screen_task.start()

if __name__ == "__main__":
    arcade_machine()

【並列処理】マルチタスクを学ぶ

アプリケーションの処理は主にCPUバウンド(CPUの計算処理)とI/Oバウンド(ディスクからの読み込み、入出力取得等)の2種類に分類される。
CPUバウンドの負荷は並列化によりパフォーマンスが改善される可能性がある。

スレッドごとに別々のタスクをループで実行

import typing as T
from threading import Thread, Event

from pacman import get_user_input, compute_game_world, render_next_screen

processor_free = Event()
#processor_free_set()

class Task(Thread):
    def __init__(self, func: T.Callable[..., None]):
        super().__init__()
        self.func = func

    def run(self) -> None:
        while True:
            processor_free.wait()
            processor_free.clear()
            self.func()

def arcade_machine() -> None:
    get_user_input_task = Task(get_user_input)
    compute_game_world_task = Task(compute_game_world)
    render_next_screen_task = Task(render_next_screen)

    get_user_input_task.start()
    compute_game_world_task.start()
    render_next_screen_task.start()

if __name__ == "__main__":
    arcade_machine()

ソースコードで見ると、構造がわかりやすい。

【Rust】rustでthread Poolによるパスワードクラッキング【並列処理】

use sha2::{Digest, Sha256};
use std::time;
use std::collections::HashMap;
use threadpool::ThreadPool;

fn main() {

    let hash = "2e9352c704043c75fa1c2a424fce7bef0569ec08af453e841101596d911d26e3".to_string();
    let length = 4;
    crack_password_parallel(hash, length);
}

fn crack_password_parallel(crypto_hash: String, length: u32) {
    let num_cores = num_cpus::get() as u32;
    let chunks = get_chunks(num_cores, length);
    let pool = ThreadPool::new(num_cores as usize);
    println!("{:?}", chunks);
    
    for (chunk_start, chunk_end) in chunks {
        let hash = crypto_hash.clone();
        pool.execute(move|| {
            println!("{}:{}", chunk_start, chunk_end);
            let combinations = get_chunk_combinations(length, chunk_start, chunk_end);
            for combination in combinations {
                if check_password(&hash, combination.clone()) {
                    println!("PASSWORD CRACKED:{}", combination);
                    break;
                }
            }
        });
    }
    pool.join();

}

fn get_chunk_combinations(length: u32, min_number: u32, max_number: u32) -> Vec<String> {
    let mut combinations: Vec<String> = Vec::new();
    for i in min_number..max_number {
        let str_num: String = i.to_string();
        let zeros: String = "0".repeat((length - str_num.chars().count() as u32).try_into().unwrap());
        combinations.push(format!("{}{}", zeros, str_num));
    }
    return combinations;
}

fn get_chunks(num_ranges: u32, length: u32) -> HashMap<u32, u32>{
    let max_number = 10_i32.pow(length) as u32;

    let mut chunk_starts = Vec::new();
    for i in 0..num_ranges {
        chunk_starts.push(max_number / num_ranges * i )
    }

    let mut chunk_ends = Vec::new();
    for i in &chunk_starts[1..] {
        chunk_ends.push(i - 1);
    }
    chunk_ends.push(max_number);

    let mut chunks:HashMap<u32, u32> = HashMap::new();
    for i in 0..chunk_starts.len() {
        chunks.insert(chunk_starts[i], chunk_ends[i]);
    }
    return chunks
}

fn get_combinations(length: u32) -> Vec<String> {
    let mut combinations: Vec<String> = Vec::new();
    let min_number = 0;
    let max_number = 10_i32.pow(length);

    for i in min_number..max_number {
        let str_num: String = i.to_string();
        let zeros: String = "0".repeat((length - str_num.chars().count() as u32).try_into().unwrap());
        combinations.push(format!("{}{}", zeros, str_num));
    }
    return combinations;
}

fn get_crypto_hash(password: String) -> String {
    let sha = Sha256::digest(password);
    hex::encode(sha).to_string()
}

fn check_password(expected_crypto_hash: &String, possible_password: String) -> bool {
    let actual_crypto_hash = get_crypto_hash(possible_password);
    return *expected_crypto_hash == actual_crypto_hash
}

fn crack_password(crypto_hash: String, length: u32) {
    println!("Processing number combinations sequentially");
    let start_time = time::Instant::now();
    let combinations: Vec<String> = get_combinations(length);
    for combination in combinations {
        if check_password(&crypto_hash.clone(), combination.clone()) {
            println!("PASSWORD CRACKED:{}", combination);
            break;
        }
    }
    println!("PROCESS TIME: {:?}", start_time.elapsed());
}

Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.32s
Running `target/debug/parallel`
{5000: 10000, 0: 4999}
5000:10000
0:4999
PASSWORD CRACKED:5231

これは中々素晴らしいね^^