I have setup the docker container to run the model evaluation.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, usize>>>) { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut line_num = 0; for line in reader.lines() { if line_num >= start && line_num < end { let words = line.unwrap().split_whitespace(); for word in words { let count = word_count.entry(word.to_string()).or_insert(0); *count += 1; } } line_num += 1; } let mut result = result.lock().unwrap(); for (word, count) in word_count { let total_count = result.entry(word).or_insert(0); *total_count += count; } } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let result = Arc::new(Mutex::new(HashMap::new())); let mut handles = vec![]; for i in 0..4 { let start = (file_len / 4) * i; let end = if i == 3 { file_len } else { (file_len / 4) * (i + 1) }; let result_clone = Arc::clone(&result); let handle = thread::spawn(move || { count_words(filename, start, end, result_clone); }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
1 2 3 4 5 | // Example output: // {"This": 1, "is": 2, "is,": 1} // {"an": 1, "example": 1} // {"This": 1, "is": 2, "is,": 1} // {"an": 1, "example": 1} |
I extracted the following code from that output:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, usize>>>) { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut line_num = 0; for line in reader.lines() { if line_num >= start && line_num < end { let words = line.unwrap().split_whitespace(); for word in words { let count = word_count.entry(word.to_string()).or_insert(0); *count += 1; } } line_num += 1; } let mut result = result.lock().unwrap(); for (word, count) in word_count { let total_count = result.entry(word).or_insert(0); *total_count += count; } } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let result = Arc::new(Mutex::new(HashMap::new())); let mut handles = vec![]; for i in 0..4 { let start = (file_len / 4) * i; let end = if i == 3 { file_len } else { (file_len / 4) * (i + 1) }; let result_clone = Arc::clone(&result); let handle = thread::spawn(move || { count_words(filename, start, end, result_clone); }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
Below is some student's code. I asked them to parallelize some code. Do not judge if it is correct, I have test cases for that. Logic errors are okay. So are synatx errors. Just tell me if they tried to parallelize it. use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, usize>>>) { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut line_num = 0; for line in reader.lines() { if line_num >= start && line_num < end { let words = line.unwrap().split_whitespace(); for word in words { let count = word_count.entry(word.to_string()).or_insert(0); *count += 1; } } line_num += 1; } let mut result = result.lock().unwrap(); for (word, count) in word_count { let total_count = result.entry(word).or_insert(0); *total_count += count; } } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let result = Arc::new(Mutex::new(HashMap::new())); let mut handles = vec![]; for i in 0..4 { let start = (file_len / 4) * i; let end = if i == 3 { file_len } else { (file_len / 4) * (i + 1) }; let result_clone = Arc::clone(&result); let handle = thread::spawn(move || { count_words(filename, start, end, result_clone); }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } Answer either 'The student passes' or 'The student fails'.
The student passes.
Testing if the previous output contains the string student passes
: True
Running the following program:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, usize>>>) { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut line_num = 0; for line in reader.lines() { if line_num >= start && line_num < end { let words = line.unwrap().split_whitespace(); for word in words { let count = word_count.entry(word.to_string()).or_insert(0); *count += 1; } } line_num += 1; } let mut result = result.lock().unwrap(); for (word, count) in word_count { let total_count = result.entry(word).or_insert(0); *total_count += count; } } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let result = Arc::new(Mutex::new(HashMap::new())); let mut handles = vec![]; for i in 0..4 { let start = (file_len / 4) * i; let end = if i == 3 { file_len } else { (file_len / 4) * (i + 1) }; let result_clone = Arc::clone(&result); let handle = thread::spawn(move || { count_words(filename, start, end, result_clone); }); handles.push(handle); } for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
error[E0716]: temporary value dropped while borrowed
--> main.rs:17:25
|
17 | let words = line.unwrap().split_whitespace();
| ^^^^^^^^^^^^^ - temporary value is freed at the end of this statement
| |
| creates a temporary value which is freed while still in use
18 | for word in words {
| ----- borrow later used here
|
= note: consider using a `let` binding to create a longer lived value
error: aborting due to 1 previous error
For more information about this error, try `rustc --explain E0716`.
main.sh: line 2: ./a.out: No such file or directory
Resulting in output: []
Testing if the previous output equals the string [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
: False
I have setup the docker container to run the model evaluation.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words_in_chunk(chunk: &str) -> HashMap<String, u32> { let mut word_count = HashMap::new(); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let file = File::open("/tmp/file.txt").expect("Unable to open file"); let reader = BufReader::new(file); let lines: Vec<String> = reader.lines().map(|line| line.unwrap()).collect(); let chunk_size = lines.len() / 4; let chunks: Vec<_> = lines.chunks(chunk_size).map(|chunk| chunk.join(" ")).collect(); let results: Vec<_> = chunks.into_iter().map(|chunk| { thread::spawn(move || { count_words_in_chunk(&chunk) }) }).collect(); for result in results { let word_count = result.join().unwrap(); let json_output = word_count.into_iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
1 2 | rustc rust_word_count.rs ./rust_word_count |
I extracted the following code from that output:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words_in_chunk(chunk: &str) -> HashMap<String, u32> { let mut word_count = HashMap::new(); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let file = File::open("/tmp/file.txt").expect("Unable to open file"); let reader = BufReader::new(file); let lines: Vec<String> = reader.lines().map(|line| line.unwrap()).collect(); let chunk_size = lines.len() / 4; let chunks: Vec<_> = lines.chunks(chunk_size).map(|chunk| chunk.join(" ")).collect(); let results: Vec<_> = chunks.into_iter().map(|chunk| { thread::spawn(move || { count_words_in_chunk(&chunk) }) }).collect(); for result in results { let word_count = result.join().unwrap(); let json_output = word_count.into_iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
Below is some student's code. I asked them to parallelize some code. Do not judge if it is correct, I have test cases for that. Logic errors are okay. So are synatx errors. Just tell me if they tried to parallelize it. use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words_in_chunk(chunk: &str) -> HashMap<String, u32> { let mut word_count = HashMap::new(); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let file = File::open("/tmp/file.txt").expect("Unable to open file"); let reader = BufReader::new(file); let lines: Vec<String> = reader.lines().map(|line| line.unwrap()).collect(); let chunk_size = lines.len() / 4; let chunks: Vec<_> = lines.chunks(chunk_size).map(|chunk| chunk.join(" ")).collect(); let results: Vec<_> = chunks.into_iter().map(|chunk| { thread::spawn(move || { count_words_in_chunk(&chunk) }) }).collect(); for result in results { let word_count = result.join().unwrap(); let json_output = word_count.into_iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } Answer either 'The student passes' or 'The student fails'.
The student passes.
Testing if the previous output contains the string student passes
: True
Running the following program:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words_in_chunk(chunk: &str) -> HashMap<String, u32> { let mut word_count = HashMap::new(); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let file = File::open("/tmp/file.txt").expect("Unable to open file"); let reader = BufReader::new(file); let lines: Vec<String> = reader.lines().map(|line| line.unwrap()).collect(); let chunk_size = lines.len() / 4; let chunks: Vec<_> = lines.chunks(chunk_size).map(|chunk| chunk.join(" ")).collect(); let results: Vec<_> = chunks.into_iter().map(|chunk| { thread::spawn(move || { count_words_in_chunk(&chunk) }) }).collect(); for result in results { let word_count = result.join().unwrap(); let json_output = word_count.into_iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
warning: unused imports: `Arc`, `Mutex`
--> main.rs:5:17
|
5 | use std::sync::{Arc, Mutex};
| ^^^ ^^^^^
|
= note: `#[warn(unused_imports)]` on by default
warning: 1 warning emitted
thread 'main' panicked at main.rs:24:32:
chunk size must be non-zero
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
Resulting in output: []
Testing if the previous output equals the string [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
: False
I have setup the docker container to run the model evaluation.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, u32>>>) { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); for (index, line) in reader.lines().enumerate() { if index >= start && index < end { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } } } let mut result = result.lock().unwrap(); for (key, value) in word_count { *result.entry(key).or_insert(0) += value; } } fn main() { let filename = "/tmp/file.txt"; let result = Arc::new(Mutex::new(HashMap::new())); let handles: Vec<_> = (0..4).map(|i| { let result = Arc::clone(&result); let filename = filename.to_string(); let start = i * 4; let end = (i + 1) * 4; thread::spawn(move || { count_words(&filename, start, end, result); }) }).collect(); for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
I extracted the following code from that output:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, u32>>>) { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); for (index, line) in reader.lines().enumerate() { if index >= start && index < end { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } } } let mut result = result.lock().unwrap(); for (key, value) in word_count { *result.entry(key).or_insert(0) += value; } } fn main() { let filename = "/tmp/file.txt"; let result = Arc::new(Mutex::new(HashMap::new())); let handles: Vec<_> = (0..4).map(|i| { let result = Arc::clone(&result); let filename = filename.to_string(); let start = i * 4; let end = (i + 1) * 4; thread::spawn(move || { count_words(&filename, start, end, result); }) }).collect(); for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
Below is some student's code. I asked them to parallelize some code. Do not judge if it is correct, I have test cases for that. Logic errors are okay. So are synatx errors. Just tell me if they tried to parallelize it. use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, u32>>>) { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); for (index, line) in reader.lines().enumerate() { if index >= start && index < end { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } } } let mut result = result.lock().unwrap(); for (key, value) in word_count { *result.entry(key).or_insert(0) += value; } } fn main() { let filename = "/tmp/file.txt"; let result = Arc::new(Mutex::new(HashMap::new())); let handles: Vec<_> = (0..4).map(|i| { let result = Arc::clone(&result); let filename = filename.to_string(); let start = i * 4; let end = (i + 1) * 4; thread::spawn(move || { count_words(&filename, start, end, result); }) }).collect(); for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } Answer either 'The student passes' or 'The student fails'.
The student passes.
Testing if the previous output contains the string student passes
: True
Running the following program:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize, result: Arc<Mutex<HashMap<String, u32>>>) { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); for (index, line) in reader.lines().enumerate() { if index >= start && index < end { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } } } let mut result = result.lock().unwrap(); for (key, value) in word_count { *result.entry(key).or_insert(0) += value; } } fn main() { let filename = "/tmp/file.txt"; let result = Arc::new(Mutex::new(HashMap::new())); let handles: Vec<_> = (0..4).map(|i| { let result = Arc::clone(&result); let filename = filename.to_string(); let start = i * 4; let end = (i + 1) * 4; thread::spawn(move || { count_words(&filename, start, end, result); }) }).collect(); for handle in handles { handle.join().unwrap(); } let result = result.lock().unwrap(); for (word, count) in result.iter() { println!("{{\"{}\": {}}}", word, count); } } |
{"Darkness,": 1}
{"times,": 2}
{"incredulity,": 1}
{"all": 2}
{"winter": 1}
{"was": 10}
{"direct": 2}
{"before": 2}
{"Light,": 1}
{"worst": 1}
{"season": 2}
{"the": 11}
{"Heaven,": 1}
{"had": 2}
{"it": 10}
{"of": 10}
{"everything": 1}
{"best": 1}
{"way": 1}
{"other": 1}
{"despair,": 1}
{"hope,": 1}
{"going": 2}
{"us,": 2}
{"to": 1}
{"belief,": 1}
{"were": 2}
{"wisdom,": 1}
{"epoch": 2}
{"spring": 1}
{"we": 4}
{"foolishness,": 1}
{"age": 2}
{"nothing": 1}
Resulting in output: [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
Testing if the previous output equals the string [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
: True
I have setup the docker container to run the model evaluation.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize) -> HashMap<String, u32> { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let lines: Vec<String> = reader.lines().map(|l| l.unwrap()).collect(); let chunk = &lines[start..end].join(" "); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = (file_len + 3) / 4; let handles: Vec<_> = (0..4).map(|i| { let start = i * chunk_size; let end = std::cmp::min((i + 1) * chunk_size, file_len); let filename = filename.to_string(); thread::spawn(move || count_words(&filename, start, end)) }).collect(); let results: Vec<HashMap<String, u32>> = handles.into_iter().map(|h| h.join().unwrap()).collect(); for result in results { let json_output: String = result.iter() .map(|(word, count)| format!(r#""{}": {}"#, word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
I extracted the following code from that output:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize) -> HashMap<String, u32> { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let lines: Vec<String> = reader.lines().map(|l| l.unwrap()).collect(); let chunk = &lines[start..end].join(" "); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = (file_len + 3) / 4; let handles: Vec<_> = (0..4).map(|i| { let start = i * chunk_size; let end = std::cmp::min((i + 1) * chunk_size, file_len); let filename = filename.to_string(); thread::spawn(move || count_words(&filename, start, end)) }).collect(); let results: Vec<HashMap<String, u32>> = handles.into_iter().map(|h| h.join().unwrap()).collect(); for result in results { let json_output: String = result.iter() .map(|(word, count)| format!(r#""{}": {}"#, word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
Below is some student's code. I asked them to parallelize some code. Do not judge if it is correct, I have test cases for that. Logic errors are okay. So are synatx errors. Just tell me if they tried to parallelize it. use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize) -> HashMap<String, u32> { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let lines: Vec<String> = reader.lines().map(|l| l.unwrap()).collect(); let chunk = &lines[start..end].join(" "); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = (file_len + 3) / 4; let handles: Vec<_> = (0..4).map(|i| { let start = i * chunk_size; let end = std::cmp::min((i + 1) * chunk_size, file_len); let filename = filename.to_string(); thread::spawn(move || count_words(&filename, start, end)) }).collect(); let results: Vec<HashMap<String, u32>> = handles.into_iter().map(|h| h.join().unwrap()).collect(); for result in results { let json_output: String = result.iter() .map(|(word, count)| format!(r#""{}": {}"#, word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } Answer either 'The student passes' or 'The student fails'.
The student passes.
Testing if the previous output contains the string student passes
: True
Running the following program:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start: usize, end: usize) -> HashMap<String, u32> { let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let lines: Vec<String> = reader.lines().map(|l| l.unwrap()).collect(); let chunk = &lines[start..end].join(" "); for word in chunk.split_whitespace() { *word_count.entry(word.to_string()).or_insert(0) += 1; } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).unwrap(); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = (file_len + 3) / 4; let handles: Vec<_> = (0..4).map(|i| { let start = i * chunk_size; let end = std::cmp::min((i + 1) * chunk_size, file_len); let filename = filename.to_string(); thread::spawn(move || count_words(&filename, start, end)) }).collect(); let results: Vec<HashMap<String, u32>> = handles.into_iter().map(|h| h.join().unwrap()).collect(); for result in results { let json_output: String = result.iter() .map(|(word, count)| format!(r#""{}": {}"#, word, count)) .collect::<Vec<String>>() .join(", "); println!("{{{}}}", json_output); } } |
warning: unused imports: `Arc`, `Mutex`
--> main.rs:5:17
|
5 | use std::sync::{Arc, Mutex};
| ^^^ ^^^^^
|
= note: `#[warn(unused_imports)]` on by default
warning: 1 warning emitted
thread '<unnamed>' panicked at main.rs:14:23:
range end index 104 out of range for slice of length 1
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
thread '<unnamed>' panicked at main.rs:14:23:
range end index 208 out of range for slice of length 1
thread '<unnamed>' panicked at main.rs:14:23:
range end index 312 out of range for slice of length 1
thread '<unnamed>' panicked at main.rs:14:23:
range end index 416 out of range for slice of length 1
thread 'main' panicked at main.rs:37:83:
called `Result::unwrap()` on an `Err` value: Any { .. }
Resulting in output: []
Testing if the previous output equals the string [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
: False
I have setup the docker container to run the model evaluation.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
Write a self-contained Rust program that prints a JSON output of how often each word appears in a text file located in /tmp/file.txt. So if the file contains "This is is is, an example" then the program should print to stdout. {"This": 1, "is": 2, "is,": 1, "an": 1, "example": 1} Don't import any crates that would require a Cargo.toml file. I'll just compile it with `rustc yourfile.rs` and it should work. The program should be run by calling /path/to/rust_word_count with no arguments. Make it run in parallel on 4 cores at the same time, each job taking ~1/4 of the file. Print out 4 differnt json dicts, one per line. I'll do the merging. Do not import serde_json, write json output from scratch.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | use std::collections::HashMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start_byte: usize, end_byte: usize) -> HashMap<String, u32> { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut current_byte = 0; for line in reader.lines() { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { if current_byte >= start_byte && current_byte < end_byte { *word_count.entry(word.to_string()).or_insert(0) += 1; } current_byte += word.len() + 1; // +1 for the space } } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).expect("Unable to open file"); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = file_len / 4; let mut handles = vec![]; for i in 0..4 { let start_byte = i * chunk_size; let end_byte = if i == 3 { file_len } else { (i + 1) * chunk_size }; let filename = filename.to_string(); let handle = thread::spawn(move || count_words(&filename, start_byte, end_byte)); handles.push(handle); } for handle in handles { let result = handle.join().unwrap(); let json_output: String = result .iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<_>>() .join(", "); println!("{ }", json_output); } } |
I extracted the following code from that output:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | use std::collections::HashMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start_byte: usize, end_byte: usize) -> HashMap<String, u32> { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut current_byte = 0; for line in reader.lines() { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { if current_byte >= start_byte && current_byte < end_byte { *word_count.entry(word.to_string()).or_insert(0) += 1; } current_byte += word.len() + 1; // +1 for the space } } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).expect("Unable to open file"); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = file_len / 4; let mut handles = vec![]; for i in 0..4 { let start_byte = i * chunk_size; let end_byte = if i == 3 { file_len } else { (i + 1) * chunk_size }; let filename = filename.to_string(); let handle = thread::spawn(move || count_words(&filename, start_byte, end_byte)); handles.push(handle); } for handle in handles { let result = handle.join().unwrap(); let json_output: String = result .iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<_>>() .join(", "); println!("{ }", json_output); } } |
Below is some student's code. I asked them to parallelize some code. Do not judge if it is correct, I have test cases for that. Logic errors are okay. So are synatx errors. Just tell me if they tried to parallelize it. use std::collections::HashMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start_byte: usize, end_byte: usize) -> HashMap<String, u32> { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut current_byte = 0; for line in reader.lines() { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { if current_byte >= start_byte && current_byte < end_byte { *word_count.entry(word.to_string()).or_insert(0) += 1; } current_byte += word.len() + 1; // +1 for the space } } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).expect("Unable to open file"); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = file_len / 4; let mut handles = vec![]; for i in 0..4 { let start_byte = i * chunk_size; let end_byte = if i == 3 { file_len } else { (i + 1) * chunk_size }; let filename = filename.to_string(); let handle = thread::spawn(move || count_words(&filename, start_byte, end_byte)); handles.push(handle); } for handle in handles { let result = handle.join().unwrap(); let json_output: String = result .iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<_>>() .join(", "); println!("{ }", json_output); } } Answer either 'The student passes' or 'The student fails'.
The student passes.
Testing if the previous output contains the string student passes
: True
Running the following program:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | use std::collections::HashMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use std::thread; fn count_words(filename: &str, start_byte: usize, end_byte: usize) -> HashMap<String, u32> { let file = File::open(filename).expect("Unable to open file"); let reader = BufReader::new(file); let mut word_count = HashMap::new(); let mut current_byte = 0; for line in reader.lines() { let line = line.expect("Unable to read line"); for word in line.split_whitespace() { if current_byte >= start_byte && current_byte < end_byte { *word_count.entry(word.to_string()).or_insert(0) += 1; } current_byte += word.len() + 1; // +1 for the space } } word_count } fn main() { let filename = "/tmp/file.txt"; let file = File::open(filename).expect("Unable to open file"); let file_len = file.metadata().unwrap().len() as usize; let chunk_size = file_len / 4; let mut handles = vec![]; for i in 0..4 { let start_byte = i * chunk_size; let end_byte = if i == 3 { file_len } else { (i + 1) * chunk_size }; let filename = filename.to_string(); let handle = thread::spawn(move || count_words(&filename, start_byte, end_byte)); handles.push(handle); } for handle in handles { let result = handle.join().unwrap(); let json_output: String = result .iter() .map(|(word, count)| format!("\"{}\": {}", word, count)) .collect::<Vec<_>>() .join(", "); println!("{ }", json_output); } } |
"age": 2, "it": 4, "best": 1, "foolishness,": 1, "the": 4, "worst": 1, "was": 4, "of": 4, "times,": 2, "wisdom,": 1
"belief,": 1, "it": 4, "Light,": 1, "season": 1, "the": 4, "epoch": 2, "incredulity,": 1, "was": 4, "of": 3
"of": 3, "season": 1, "Darkness,": 1, "it": 2, "was": 2, "spring": 1, "winter": 1, "the": 2, "everything": 1, "had": 1, "hope,": 1, "we": 1, "before": 1, "despair,": 1
"were": 2, "Heaven,": 1, "had": 1, "us,": 2, "before": 1, "all": 2, "we": 3, "going": 2, "to": 1, "direct": 2, "nothing": 1, "the": 1, "other": 1, "way": 1
warning: unused imports: `Arc`, `Mutex`
--> main.rs:5:17
|
5 | use std::sync::{Arc, Mutex};
| ^^^ ^^^^^
|
= note: `#[warn(unused_imports)]` on by default
warning: 1 warning emitted
Resulting in output: []
Testing if the previous output equals the string [('Darkness,', 1), ('Heaven,', 1), ('Light,', 1), ('age', 2), ('all', 2), ('before', 2), ('belief,', 1), ('best', 1), ('despair,', 1), ('direct', 2), ('epoch', 2), ('everything', 1), ('foolishness,', 1), ('going', 2), ('had', 2), ('hope,', 1), ('incredulity,', 1), ('it', 10), ('nothing', 1), ('of', 10), ('other', 1), ('season', 2), ('spring', 1), ('the', 11), ('times,', 2), ('to', 1), ('us,', 2), ('was', 10), ('way', 1), ('we', 4), ('were', 2), ('winter', 1), ('wisdom,', 1), ('worst', 1)]
: False