Merge pull request 'Added threading exercise' (#54) from 104_threading into main

Reviewed-on: https://codeberg.org/ziglings/exercises/pulls/54
This commit is contained in:
Chris Boesch 2024-03-05 08:22:40 +00:00
commit 9e9cf40453
4 changed files with 161 additions and 0 deletions

View file

@ -216,6 +216,7 @@ Zig Core Language
* [X] Interfaces * [X] Interfaces
* [X] Bit manipulation * [X] Bit manipulation
* [X] Working with C * [X] Working with C
* [X] Threading
* [ ] Interfaces part 2 * [ ] Interfaces part 2
Zig Standard Library Zig Standard Library

View file

@ -1103,6 +1103,20 @@ const exercises = [_]Exercise{
\\This little poem has 15 words! \\This little poem has 15 words!
, ,
}, },
.{
.main_file = "104_threading.zig",
.output =
\\Starting work...
\\thread 1: started.
\\thread 2: started.
\\thread 3: started.
\\Some weird stuff, after starting the threads.
\\thread 2: finished.
\\thread 1: finished.
\\thread 3: finished.
\\Zig is cool!
,
},
.{ .{
.main_file = "999_the_end.zig", .main_file = "999_the_end.zig",
.output = .output =

129
exercises/104_threading.zig Normal file
View file

@ -0,0 +1,129 @@
//
// Whenever there is a lot to calculate, the question arises as to how
// tasks can be carried out simultaneously. We have already learned about
// one possibility, namely asynchronous processes, in Exercises 84-91.
//
// However, the computing power of the processor is only distributed to
// the started tasks, which always reaches its limits when pure computing
// power is called up.
//
// For example, in blockchains based on proof of work, the miners have
// to find a nonce for a certain character string so that the first m bits
// in the hash of the character string and the nonce are zeros.
// As the miner who can solve the task first receives the reward, everyone
// tries to complete the calculations as quickly as possible.
//
// This is where multithreading comes into play, where tasks are actually
// distributed across several cores of the CPU or GPU, which then really
// means a multiplication of performance.
//
// The following diagram roughly illustrates the difference between the
// various types of process execution.
// The 'Overall Time' column is intended to illustrate how the time is
// affected if, instead of one core as in synchronous and asynchronous
// processing, a second core now helps to complete the work in multithreading.
//
// In the ideal case shown, execution takes only half the time compared
// to the synchronous single thread. And even asynchronous processing
// is only slightly faster in comparison.
//
//
// Synchronous Asynchronous
// Processing Processing Multithreading
//
// Thread 1 Thread 1 Thread 1 Thread 2
// Overall Time
//
//
// T T T T
// a a a a
// s s s s
// k k k k
//
// 1 1 1 3
//
// 5 Sec
//
// Blocking T T T
// a a a
// s s s 8 Sec
// k k k
// T
// a 2 2 4
// s
// k 10 Sec
//
// 1 T
// a
// s
// k
// T
// a 1
// s
// k
//
// 2
//
//
//
//
//
// The diagram was modeled on the one in a blog in which the differences
// between asynchronous processing and multithreading are explained in detail:
// https://blog.devgenius.io/multi-threading-vs-asynchronous-programming-what-is-the-difference-3ebfe1179a5
//
// Our exercise is essentially about clarifying the approach in Zig and
// therefore we try to keep it as simple as possible.
// Multithreading in itself is already difficult enough. ;-)
//
const std = @import("std");
pub fn main() !void {
// This is where the preparatory work takes place
// before the parallel processing begins.
std.debug.print("Starting work...\n", .{});
// These curly brackets are very important, they are necessary
// to enclose the area where the threads are called.
// Without these brackets, the program would not wait for the
// end of the threads and they would continue to run beyond the
// end of the program.
{
// Now we start the first thread, with the number as parameter
const handle = try std.Thread.spawn(.{}, thread_function, .{1});
// Waits for the thread to complete,
// then deallocates any resources created on `spawn()`.
defer handle.join();
// Second thread
const handle2 = try std.Thread.spawn(.{}, thread_function, .{-4}); // that can't be right?
defer handle2.join();
// Third thread
const handle3 = try std.Thread.spawn(.{}, thread_function, .{3});
defer ??? // <-- something is missing
// After the threads have been started,
// they run in parallel and we can still do some work in between.
std.time.sleep((1) * std.time.ns_per_s);
std.debug.print("Some weird stuff, after starting the threads.\n", .{});
}
// After we have left the closed area, we wait until
// the threads have run through, if this has not yet been the case.
std.debug.print("Zig is cool!\n", .{});
}
// This function is started with every thread that we set up.
// In our example, we pass the number of the thread as a parameter.
fn thread_function(num: usize) !void {
std.debug.print("thread {d}: {s}\n", .{ num, "started." });
std.time.sleep((5 - num % 3) * std.time.ns_per_s);
std.debug.print("thread {d}: {s}\n", .{ num, "finished." });
}
// This is the easiest way to run threads in parallel.
// In general, however, more management effort is required,
// e.g. by setting up a pool and allowing the threads to communicate
// with each other using semaphores.
//
// But that's a topic for another exercise.

View file

@ -0,0 +1,17 @@
--- exercises/104_threading.zig 2024-03-05 09:09:04.013974229 +0100
+++ answers/104_threading.zig 2024-03-05 09:12:03.987162883 +0100
@@ -97,12 +97,12 @@
defer handle.join();
// Second thread
- const handle2 = try std.Thread.spawn(.{}, thread_function, .{-4}); // that can't be right?
+ const handle2 = try std.Thread.spawn(.{}, thread_function, .{2});
defer handle2.join();
// Third thread
const handle3 = try std.Thread.spawn(.{}, thread_function, .{3});
- defer ??? // <-- something is missing
+ defer handle3.join();
// After the threads have been started,
// they run in parallel and we can still do some work in between.