From 33325f4a0bc0931823867b11241dff3c2a0b60b7 Mon Sep 17 00:00:00 2001 From: Mark VanderVoord Date: Thu, 7 Jan 2016 14:20:59 -0500 Subject: [PATCH] More fixes to line endings --- auto/colour_reporter.rb | 76 +- auto/generate_config.yml | 72 +- auto/generate_module.rb | 404 +++++----- auto/generate_test_runner.rb | 748 +++++++++--------- auto/unity_test_summary.py | 270 +++---- auto/unity_test_summary.rb | 296 +++---- docs/Unity Summary.txt | 448 +++++------ docs/license.txt | 42 +- .../test_runners/TestProductionCode2_Runner.c | 106 +-- .../test_runners/TestProductionCode_Runner.c | 114 +-- test/rakefile | 120 +-- test/rakefile_helper.rb | 510 ++++++------ test/testdata/mocksample.c | 102 +-- test/testdata/sample.yml | 16 +- test/testdata/testsample.c | 102 +-- test/tests/test_generate_test_runner.rb | 204 ++--- 16 files changed, 1815 insertions(+), 1815 deletions(-) diff --git a/auto/colour_reporter.rb b/auto/colour_reporter.rb index 89e7951..5aa1d27 100644 --- a/auto/colour_reporter.rb +++ b/auto/colour_reporter.rb @@ -1,39 +1,39 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -require "#{File.expand_path(File.dirname(__FILE__))}/colour_prompt" - -$colour_output = true - -def report(message) - if not $colour_output - $stdout.puts(message) - else - message = message.join('\n') if (message.class == Array) - message.each_line do |line| - line.chomp! - colour = case(line) - when /(?:total\s+)?tests:?\s+(\d+)\s+(?:total\s+)?failures:?\s+\d+\s+Ignored:?/i - ($1.to_i == 0) ? :green : :red - when /PASS/ - :green - when /^OK$/ - :green - when /(?:FAIL|ERROR)/ - :red - when /IGNORE/ - :yellow - when /^(?:Creating|Compiling|Linking)/ - :white - else - :silver - end - colour_puts(colour, line) - end - end - $stdout.flush - $stderr.flush +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +require "#{File.expand_path(File.dirname(__FILE__))}/colour_prompt" + +$colour_output = true + +def report(message) + if not $colour_output + $stdout.puts(message) + else + message = message.join('\n') if (message.class == Array) + message.each_line do |line| + line.chomp! + colour = case(line) + when /(?:total\s+)?tests:?\s+(\d+)\s+(?:total\s+)?failures:?\s+\d+\s+Ignored:?/i + ($1.to_i == 0) ? :green : :red + when /PASS/ + :green + when /^OK$/ + :green + when /(?:FAIL|ERROR)/ + :red + when /IGNORE/ + :yellow + when /^(?:Creating|Compiling|Linking)/ + :white + else + :silver + end + colour_puts(colour, line) + end + end + $stdout.flush + $stderr.flush end \ No newline at end of file diff --git a/auto/generate_config.yml b/auto/generate_config.yml index ce66cea..4a5e474 100644 --- a/auto/generate_config.yml +++ b/auto/generate_config.yml @@ -1,36 +1,36 @@ -#this is a sample configuration file for generate_module -#you would use it by calling generate_module with the -ygenerate_config.yml option -#files like this are useful for customizing generate_module to your environment -:generate_module: - :defaults: - #these defaults are used in place of any missing options at the command line - :path_src: ../src/ - :path_inc: ../src/ - :path_tst: ../test/ - :update_svn: true - :includes: - #use [] for no additional includes, otherwise list the includes on separate lines - :src: - - Defs.h - - Board.h - :inc: [] - :tst: - - Defs.h - - Board.h - - Exception.h - :boilerplates: - #these are inserted at the top of generated files. - #just comment out or remove if not desired. - #use %1$s where you would like the file name to appear (path/extension not included) - :src: | - //------------------------------------------- - // %1$s.c - //------------------------------------------- - :inc: | - //------------------------------------------- - // %1$s.h - //------------------------------------------- - :tst: | - //------------------------------------------- - // Test%1$s.c : Units tests for %1$s.c - //------------------------------------------- +#this is a sample configuration file for generate_module +#you would use it by calling generate_module with the -ygenerate_config.yml option +#files like this are useful for customizing generate_module to your environment +:generate_module: + :defaults: + #these defaults are used in place of any missing options at the command line + :path_src: ../src/ + :path_inc: ../src/ + :path_tst: ../test/ + :update_svn: true + :includes: + #use [] for no additional includes, otherwise list the includes on separate lines + :src: + - Defs.h + - Board.h + :inc: [] + :tst: + - Defs.h + - Board.h + - Exception.h + :boilerplates: + #these are inserted at the top of generated files. + #just comment out or remove if not desired. + #use %1$s where you would like the file name to appear (path/extension not included) + :src: | + //------------------------------------------- + // %1$s.c + //------------------------------------------- + :inc: | + //------------------------------------------- + // %1$s.h + //------------------------------------------- + :tst: | + //------------------------------------------- + // Test%1$s.c : Units tests for %1$s.c + //------------------------------------------- diff --git a/auto/generate_module.rb b/auto/generate_module.rb index 4a020eb..3db1a98 100644 --- a/auto/generate_module.rb +++ b/auto/generate_module.rb @@ -1,202 +1,202 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -# This script creates all the files with start code necessary for a new module. -# A simple module only requires a source file, header file, and test file. -# Triad modules require a source, header, and test file for each triad type (like model, conductor, and hardware). - -require 'rubygems' -require 'fileutils' - -HERE = File.expand_path(File.dirname(__FILE__)) + '/' - -#help text when requested -HELP_TEXT = [ "\nGENERATE MODULE\n-------- ------", - "\nUsage: ruby generate_module [options] module_name", - " -i\"include\" sets the path to output headers to 'include' (DEFAULT ../src)", - " -s\"../src\" sets the path to output source to '../src' (DEFAULT ../src)", - " -t\"C:/test\" sets the path to output source to 'C:/test' (DEFAULT ../test)", - " -p\"MCH\" sets the output pattern to MCH.", - " dh - driver hardware.", - " dih - driver interrupt hardware.", - " mch - model conductor hardware.", - " mvp - model view presenter.", - " src - just a single source module. (DEFAULT)", - " -d destroy module instead of creating it.", - " -u update subversion too (requires subversion command line)", - " -y\"my.yml\" selects a different yaml config file for module generation", - "" ].join("\n") - -#Built in patterns -PATTERNS = { 'src' => {'' => { :inc => [] } }, - 'dh' => {'Driver' => { :inc => ['%1$sHardware.h'] }, - 'Hardware' => { :inc => [] } - }, - 'dih' => {'Driver' => { :inc => ['%1$sHardware.h', '%1$sInterrupt.h'] }, - 'Interrupt'=> { :inc => ['%1$sHardware.h'] }, - 'Hardware' => { :inc => [] } - }, - 'mch' => {'Model' => { :inc => [] }, - 'Conductor'=> { :inc => ['%1$sModel.h', '%1$sHardware.h'] }, - 'Hardware' => { :inc => [] } - }, - 'mvp' => {'Model' => { :inc => [] }, - 'Presenter'=> { :inc => ['%1$sModel.h', '%1$sView.h'] }, - 'View' => { :inc => [] } - } - } - -#TEMPLATE_TST -TEMPLATE_TST = %q[#include "unity.h" -%2$s#include "%1$s.h" - -void setUp(void) -{ -} - -void tearDown(void) -{ -} - -void test_%1$s_NeedToImplement(void) -{ - TEST_IGNORE(); -} -] - -#TEMPLATE_SRC -TEMPLATE_SRC = %q[%2$s#include "%1$s.h" -] - -#TEMPLATE_INC -TEMPLATE_INC = %q[#ifndef _%3$s_H -#define _%3$s_H%2$s - -#endif // _%3$s_H -] - -# Parse the command line parameters. -ARGV.each do |arg| - case(arg) - when /^-d/ then @destroy = true - when /^-u/ then @update_svn = true - when /^-p(\w+)/ then @pattern = $1 - when /^-s(.+)/ then @path_src = $1 - when /^-i(.+)/ then @path_inc = $1 - when /^-t(.+)/ then @path_tst = $1 - when /^-y(.+)/ then @yaml_config = $1 - when /^(\w+)/ - raise "ERROR: You can't have more than one Module name specified!" unless @module_name.nil? - @module_name = arg - when /^-(h|-help)/ - puts HELP_TEXT - exit - else - raise "ERROR: Unknown option specified '#{arg}'" - end -end -raise "ERROR: You must have a Module name specified! (use option -h for help)" if @module_name.nil? - -#load yaml file if one was requested -if @yaml_config - require 'yaml' - cfg = YAML.load_file(HERE + @yaml_config)[:generate_module] - @path_src = cfg[:defaults][:path_src] if @path_src.nil? - @path_inc = cfg[:defaults][:path_inc] if @path_inc.nil? - @path_tst = cfg[:defaults][:path_tst] if @path_tst.nil? - @update_svn = cfg[:defaults][:update_svn] if @update_svn.nil? - @extra_inc = cfg[:includes] - @boilerplates = cfg[:boilerplates] -else - @boilerplates = {} -end - -# Create default file paths if none were provided -@path_src = HERE + "../src/" if @path_src.nil? -@path_inc = @path_src if @path_inc.nil? -@path_tst = HERE + "../test/" if @path_tst.nil? -@path_src += '/' unless (@path_src[-1] == 47) -@path_inc += '/' unless (@path_inc[-1] == 47) -@path_tst += '/' unless (@path_tst[-1] == 47) -@pattern = 'src' if @pattern.nil? -@includes = { :src => [], :inc => [], :tst => [] } -@includes.merge!(@extra_inc) unless @extra_inc.nil? - -#create triad definition -TRIAD = [ { :ext => '.c', :path => @path_src, :template => TEMPLATE_SRC, :inc => :src, :boilerplate => @boilerplates[:src] }, - { :ext => '.h', :path => @path_inc, :template => TEMPLATE_INC, :inc => :inc, :boilerplate => @boilerplates[:inc] }, - { :ext => '.c', :path => @path_tst+'Test', :template => TEMPLATE_TST, :inc => :tst, :boilerplate => @boilerplates[:tst] }, - ] - -#prepare the pattern for use -@patterns = PATTERNS[@pattern.downcase] -raise "ERROR: The design pattern specified isn't one that I recognize!" if @patterns.nil? - -# Assemble the path/names of the files we need to work with. -files = [] -TRIAD.each do |triad| - @patterns.each_pair do |pattern_file, pattern_traits| - files << { - :path => "#{triad[:path]}#{@module_name}#{pattern_file}#{triad[:ext]}", - :name => "#{@module_name}#{pattern_file}", - :template => triad[:template], - :boilerplate => triad[:boilerplate], - :includes => case(triad[:inc]) - when :src then @includes[:src] | pattern_traits[:inc].map{|f| f % [@module_name]} - when :inc then @includes[:inc] - when :tst then @includes[:tst] | pattern_traits[:inc].map{|f| "Mock#{f}"% [@module_name]} - end - } - end -end - -# destroy files if that was what was requested -if @destroy - files.each do |filespec| - file = filespec[:path] - if File.exist?(file) - if @update_svn - `svn delete \"#{file}\" --force` - puts "File #{file} deleted and removed from source control" - else - FileUtils.remove(file) - puts "File #{file} deleted" - end - else - puts "File #{file} does not exist so cannot be removed." - end - end - puts "Destroy Complete" - exit -end - -#Abort if any module already exists -files.each do |file| - raise "ERROR: File #{file[:name]} already exists. Exiting." if File.exist?(file[:path]) -end - -# Create Source Modules -files.each_with_index do |file, i| - File.open(file[:path], 'w') do |f| - f.write(file[:boilerplate] % [file[:name]]) unless file[:boilerplate].nil? - f.write(file[:template] % [ file[:name], - file[:includes].map{|f| "#include \"#{f}\"\n"}.join, - file[:name].upcase ] - ) - end - if (@update_svn) - `svn add \"#{file[:path]}\"` - if $?.exitstatus == 0 - puts "File #{file[:path]} created and added to source control" - else - puts "File #{file[:path]} created but FAILED adding to source control!" - end - else - puts "File #{file[:path]} created" - end -end - -puts 'Generate Complete' +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +# This script creates all the files with start code necessary for a new module. +# A simple module only requires a source file, header file, and test file. +# Triad modules require a source, header, and test file for each triad type (like model, conductor, and hardware). + +require 'rubygems' +require 'fileutils' + +HERE = File.expand_path(File.dirname(__FILE__)) + '/' + +#help text when requested +HELP_TEXT = [ "\nGENERATE MODULE\n-------- ------", + "\nUsage: ruby generate_module [options] module_name", + " -i\"include\" sets the path to output headers to 'include' (DEFAULT ../src)", + " -s\"../src\" sets the path to output source to '../src' (DEFAULT ../src)", + " -t\"C:/test\" sets the path to output source to 'C:/test' (DEFAULT ../test)", + " -p\"MCH\" sets the output pattern to MCH.", + " dh - driver hardware.", + " dih - driver interrupt hardware.", + " mch - model conductor hardware.", + " mvp - model view presenter.", + " src - just a single source module. (DEFAULT)", + " -d destroy module instead of creating it.", + " -u update subversion too (requires subversion command line)", + " -y\"my.yml\" selects a different yaml config file for module generation", + "" ].join("\n") + +#Built in patterns +PATTERNS = { 'src' => {'' => { :inc => [] } }, + 'dh' => {'Driver' => { :inc => ['%1$sHardware.h'] }, + 'Hardware' => { :inc => [] } + }, + 'dih' => {'Driver' => { :inc => ['%1$sHardware.h', '%1$sInterrupt.h'] }, + 'Interrupt'=> { :inc => ['%1$sHardware.h'] }, + 'Hardware' => { :inc => [] } + }, + 'mch' => {'Model' => { :inc => [] }, + 'Conductor'=> { :inc => ['%1$sModel.h', '%1$sHardware.h'] }, + 'Hardware' => { :inc => [] } + }, + 'mvp' => {'Model' => { :inc => [] }, + 'Presenter'=> { :inc => ['%1$sModel.h', '%1$sView.h'] }, + 'View' => { :inc => [] } + } + } + +#TEMPLATE_TST +TEMPLATE_TST = %q[#include "unity.h" +%2$s#include "%1$s.h" + +void setUp(void) +{ +} + +void tearDown(void) +{ +} + +void test_%1$s_NeedToImplement(void) +{ + TEST_IGNORE(); +} +] + +#TEMPLATE_SRC +TEMPLATE_SRC = %q[%2$s#include "%1$s.h" +] + +#TEMPLATE_INC +TEMPLATE_INC = %q[#ifndef _%3$s_H +#define _%3$s_H%2$s + +#endif // _%3$s_H +] + +# Parse the command line parameters. +ARGV.each do |arg| + case(arg) + when /^-d/ then @destroy = true + when /^-u/ then @update_svn = true + when /^-p(\w+)/ then @pattern = $1 + when /^-s(.+)/ then @path_src = $1 + when /^-i(.+)/ then @path_inc = $1 + when /^-t(.+)/ then @path_tst = $1 + when /^-y(.+)/ then @yaml_config = $1 + when /^(\w+)/ + raise "ERROR: You can't have more than one Module name specified!" unless @module_name.nil? + @module_name = arg + when /^-(h|-help)/ + puts HELP_TEXT + exit + else + raise "ERROR: Unknown option specified '#{arg}'" + end +end +raise "ERROR: You must have a Module name specified! (use option -h for help)" if @module_name.nil? + +#load yaml file if one was requested +if @yaml_config + require 'yaml' + cfg = YAML.load_file(HERE + @yaml_config)[:generate_module] + @path_src = cfg[:defaults][:path_src] if @path_src.nil? + @path_inc = cfg[:defaults][:path_inc] if @path_inc.nil? + @path_tst = cfg[:defaults][:path_tst] if @path_tst.nil? + @update_svn = cfg[:defaults][:update_svn] if @update_svn.nil? + @extra_inc = cfg[:includes] + @boilerplates = cfg[:boilerplates] +else + @boilerplates = {} +end + +# Create default file paths if none were provided +@path_src = HERE + "../src/" if @path_src.nil? +@path_inc = @path_src if @path_inc.nil? +@path_tst = HERE + "../test/" if @path_tst.nil? +@path_src += '/' unless (@path_src[-1] == 47) +@path_inc += '/' unless (@path_inc[-1] == 47) +@path_tst += '/' unless (@path_tst[-1] == 47) +@pattern = 'src' if @pattern.nil? +@includes = { :src => [], :inc => [], :tst => [] } +@includes.merge!(@extra_inc) unless @extra_inc.nil? + +#create triad definition +TRIAD = [ { :ext => '.c', :path => @path_src, :template => TEMPLATE_SRC, :inc => :src, :boilerplate => @boilerplates[:src] }, + { :ext => '.h', :path => @path_inc, :template => TEMPLATE_INC, :inc => :inc, :boilerplate => @boilerplates[:inc] }, + { :ext => '.c', :path => @path_tst+'Test', :template => TEMPLATE_TST, :inc => :tst, :boilerplate => @boilerplates[:tst] }, + ] + +#prepare the pattern for use +@patterns = PATTERNS[@pattern.downcase] +raise "ERROR: The design pattern specified isn't one that I recognize!" if @patterns.nil? + +# Assemble the path/names of the files we need to work with. +files = [] +TRIAD.each do |triad| + @patterns.each_pair do |pattern_file, pattern_traits| + files << { + :path => "#{triad[:path]}#{@module_name}#{pattern_file}#{triad[:ext]}", + :name => "#{@module_name}#{pattern_file}", + :template => triad[:template], + :boilerplate => triad[:boilerplate], + :includes => case(triad[:inc]) + when :src then @includes[:src] | pattern_traits[:inc].map{|f| f % [@module_name]} + when :inc then @includes[:inc] + when :tst then @includes[:tst] | pattern_traits[:inc].map{|f| "Mock#{f}"% [@module_name]} + end + } + end +end + +# destroy files if that was what was requested +if @destroy + files.each do |filespec| + file = filespec[:path] + if File.exist?(file) + if @update_svn + `svn delete \"#{file}\" --force` + puts "File #{file} deleted and removed from source control" + else + FileUtils.remove(file) + puts "File #{file} deleted" + end + else + puts "File #{file} does not exist so cannot be removed." + end + end + puts "Destroy Complete" + exit +end + +#Abort if any module already exists +files.each do |file| + raise "ERROR: File #{file[:name]} already exists. Exiting." if File.exist?(file[:path]) +end + +# Create Source Modules +files.each_with_index do |file, i| + File.open(file[:path], 'w') do |f| + f.write(file[:boilerplate] % [file[:name]]) unless file[:boilerplate].nil? + f.write(file[:template] % [ file[:name], + file[:includes].map{|f| "#include \"#{f}\"\n"}.join, + file[:name].upcase ] + ) + end + if (@update_svn) + `svn add \"#{file[:path]}\"` + if $?.exitstatus == 0 + puts "File #{file[:path]} created and added to source control" + else + puts "File #{file[:path]} created but FAILED adding to source control!" + end + else + puts "File #{file[:path]} created" + end +end + +puts 'Generate Complete' diff --git a/auto/generate_test_runner.rb b/auto/generate_test_runner.rb index 1901d4f..32c0ace 100644 --- a/auto/generate_test_runner.rb +++ b/auto/generate_test_runner.rb @@ -1,374 +1,374 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -$QUICK_RUBY_VERSION = RUBY_VERSION.split('.').inject(0){|vv,v| vv * 100 + v.to_i } -File.expand_path(File.join(File.dirname(__FILE__),'colour_prompt')) - -class UnityTestRunnerGenerator - - def initialize(options = nil) - @options = UnityTestRunnerGenerator.default_options - case(options) - when NilClass then @options - when String then @options.merge!(UnityTestRunnerGenerator.grab_config(options)) - when Hash then @options.merge!(options) - else raise "If you specify arguments, it should be a filename or a hash of options" - end - require "#{File.expand_path(File.dirname(__FILE__))}/type_sanitizer" - end - - def self.default_options - { - :includes => [], - :plugins => [], - :framework => :unity, - :test_prefix => "test|spec|should", - :setup_name => "setUp", - :teardown_name => "tearDown", - } - end - - def self.grab_config(config_file) - options = self.default_options - unless (config_file.nil? or config_file.empty?) - require 'yaml' - yaml_guts = YAML.load_file(config_file) - options.merge!(yaml_guts[:unity] || yaml_guts[:cmock]) - raise "No :unity or :cmock section found in #{config_file}" unless options - end - return(options) - end - - def run(input_file, output_file, options=nil) - tests = [] - testfile_includes = [] - used_mocks = [] - - @options.merge!(options) unless options.nil? - module_name = File.basename(input_file) - - #pull required data from source file - source = File.read(input_file) - source = source.force_encoding("ISO-8859-1").encode("utf-8", :replace => nil) if ($QUICK_RUBY_VERSION > 10900) - tests = find_tests(source) - headers = find_includes(source) - testfile_includes = (headers[:local] + headers[:system]) - used_mocks = find_mocks(testfile_includes) - testfile_includes = (testfile_includes - used_mocks) - testfile_includes.delete_if{|inc| inc =~ /(unity|cmock)/} - - #build runner file - generate(input_file, output_file, tests, used_mocks, testfile_includes) - - #determine which files were used to return them - all_files_used = [input_file, output_file] - all_files_used += testfile_includes.map {|filename| filename + '.c'} unless testfile_includes.empty? - all_files_used += @options[:includes] unless @options[:includes].empty? - return all_files_used.uniq - end - - def generate(input_file, output_file, tests, used_mocks, testfile_includes) - File.open(output_file, 'w') do |output| - create_header(output, used_mocks, testfile_includes) - create_externs(output, tests, used_mocks) - create_mock_management(output, used_mocks) - create_suite_setup_and_teardown(output) - create_reset(output, used_mocks) - create_main(output, input_file, tests, used_mocks) - end - - if (@options[:header_file] && !@options[:header_file].empty?) - File.open(@options[:header_file], 'w') do |output| - create_h_file(output, @options[:header_file], tests, testfile_includes) - end - end - end - - def find_tests(source) - tests_and_line_numbers = [] - - source_scrubbed = source.gsub(/\/\/.*$/, '') # remove line comments - source_scrubbed = source_scrubbed.gsub(/\/\*.*?\*\//m, '') # remove block comments - lines = source_scrubbed.split(/(^\s*\#.*$) # Treat preprocessor directives as a logical line - | (;|\{|\}) /x) # Match ;, {, and } as end of lines - - lines.each_with_index do |line, index| - #find tests - if line =~ /^((?:\s*TEST_CASE\s*\(.*?\)\s*)*)\s*void\s+((?:#{@options[:test_prefix]}).*)\s*\(\s*(.*)\s*\)/ - arguments = $1 - name = $2 - call = $3 - params = $4 - args = nil - if (@options[:use_param_tests] and !arguments.empty?) - args = [] - arguments.scan(/\s*TEST_CASE\s*\((.*)\)\s*$/) {|a| args << a[0]} - end - tests_and_line_numbers << { :test => name, :args => args, :call => call, :params => params, :line_number => 0 } - end - end - tests_and_line_numbers.uniq! {|v| v[:test] } - - #determine line numbers and create tests to run - source_lines = source.split("\n") - source_index = 0; - tests_and_line_numbers.size.times do |i| - source_lines[source_index..-1].each_with_index do |line, index| - if (line =~ /#{tests_and_line_numbers[i][:test]}/) - source_index += index - tests_and_line_numbers[i][:line_number] = source_index + 1 - break - end - end - end - - return tests_and_line_numbers - end - - def find_includes(source) - - #remove comments (block and line, in three steps to ensure correct precedence) - source.gsub!(/\/\/(?:.+\/\*|\*(?:$|[^\/])).*$/, '') # remove line comments that comment out the start of blocks - source.gsub!(/\/\*.*?\*\//m, '') # remove block comments - source.gsub!(/\/\/.*$/, '') # remove line comments (all that remain) - - #parse out includes - includes = { - :local => source.scan(/^\s*#include\s+\"\s*(.+)\.[hH]\s*\"/).flatten, - :system => source.scan(/^\s*#include\s+<\s*(.+)\s*>/).flatten.map { |inc| "<#{inc}>" } - } - return includes - end - - def find_mocks(includes) - mock_headers = [] - includes.each do |include_file| - mock_headers << File.basename(include_file) if (include_file =~ /^mock/i) - end - return mock_headers - end - - def create_header(output, mocks, testfile_includes=[]) - output.puts('/* AUTOGENERATED FILE. DO NOT EDIT. */') - create_runtest(output, mocks) - output.puts("\n//=======Automagically Detected Files To Include=====") - output.puts("#include \"#{@options[:framework].to_s}.h\"") - output.puts('#include "cmock.h"') unless (mocks.empty?) - output.puts('#include ') - output.puts('#include ') - output.puts('#include "CException.h"') if @options[:plugins].include?(:cexception) - if (@options[:header_file] && !@options[:header_file].empty?) - output.puts("#include \"#{File.basename(@options[:header_file])}\"") - else - @options[:includes].flatten.uniq.compact.each do |inc| - output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") - end - testfile_includes.each do |inc| - output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") - end - end - mocks.each do |mock| - output.puts("#include \"#{mock.gsub('.h','')}.h\"") - end - if @options[:enforce_strict_ordering] - output.puts('') - output.puts('int GlobalExpectCount;') - output.puts('int GlobalVerifyOrder;') - output.puts('char* GlobalOrderError;') - end - end - - def create_externs(output, tests, mocks) - output.puts("\n//=======External Functions This Runner Calls=====") - output.puts("extern void #{@options[:setup_name]}(void);") - output.puts("extern void #{@options[:teardown_name]}(void);") - tests.each do |test| - output.puts("extern void #{test[:test]}(#{test[:call] || 'void'});") - end - output.puts('') - end - - def create_mock_management(output, mocks) - unless (mocks.empty?) - output.puts("\n//=======Mock Management=====") - output.puts("static void CMock_Init(void)") - output.puts("{") - if @options[:enforce_strict_ordering] - output.puts(" GlobalExpectCount = 0;") - output.puts(" GlobalVerifyOrder = 0;") - output.puts(" GlobalOrderError = NULL;") - end - mocks.each do |mock| - mock_clean = TypeSanitizer.sanitize_c_identifier(mock) - output.puts(" #{mock_clean}_Init();") - end - output.puts("}\n") - - output.puts("static void CMock_Verify(void)") - output.puts("{") - mocks.each do |mock| - mock_clean = TypeSanitizer.sanitize_c_identifier(mock) - output.puts(" #{mock_clean}_Verify();") - end - output.puts("}\n") - - output.puts("static void CMock_Destroy(void)") - output.puts("{") - mocks.each do |mock| - mock_clean = TypeSanitizer.sanitize_c_identifier(mock) - output.puts(" #{mock_clean}_Destroy();") - end - output.puts("}\n") - end - end - - def create_suite_setup_and_teardown(output) - unless (@options[:suite_setup].nil?) - output.puts("\n//=======Suite Setup=====") - output.puts("static int suite_setup(void)") - output.puts("{") - output.puts(@options[:suite_setup]) - output.puts("}") - end - unless (@options[:suite_teardown].nil?) - output.puts("\n//=======Suite Teardown=====") - output.puts("static int suite_teardown(int num_failures)") - output.puts("{") - output.puts(@options[:suite_teardown]) - output.puts("}") - end - end - - def create_runtest(output, used_mocks) - cexception = @options[:plugins].include? :cexception - va_args1 = @options[:use_param_tests] ? ', ...' : '' - va_args2 = @options[:use_param_tests] ? '__VA_ARGS__' : '' - output.puts("\n//=======Test Runner Used To Run Each Test Below=====") - output.puts("#define RUN_TEST_NO_ARGS") if @options[:use_param_tests] - output.puts("#define RUN_TEST(TestFunc, TestLineNum#{va_args1}) \\") - output.puts("{ \\") - output.puts(" Unity.CurrentTestName = #TestFunc#{va_args2.empty? ? '' : " \"(\" ##{va_args2} \")\""}; \\") - output.puts(" Unity.CurrentTestLineNumber = TestLineNum; \\") - output.puts(" Unity.NumberOfTests++; \\") - output.puts(" CMock_Init(); \\") unless (used_mocks.empty?) - output.puts(" UNITY_CLR_DETAILS(); \\") unless (used_mocks.empty?) - output.puts(" if (TEST_PROTECT()) \\") - output.puts(" { \\") - output.puts(" CEXCEPTION_T e; \\") if cexception - output.puts(" Try { \\") if cexception - output.puts(" #{@options[:setup_name]}(); \\") - output.puts(" TestFunc(#{va_args2}); \\") - output.puts(" } Catch(e) { TEST_ASSERT_EQUAL_HEX32_MESSAGE(CEXCEPTION_NONE, e, \"Unhandled Exception!\"); } \\") if cexception - output.puts(" } \\") - output.puts(" if (TEST_PROTECT() && !TEST_IS_IGNORED) \\") - output.puts(" { \\") - output.puts(" #{@options[:teardown_name]}(); \\") - output.puts(" CMock_Verify(); \\") unless (used_mocks.empty?) - output.puts(" } \\") - output.puts(" CMock_Destroy(); \\") unless (used_mocks.empty?) - output.puts(" UnityConcludeTest(); \\") - output.puts("}\n") - end - - def create_reset(output, used_mocks) - output.puts("\n//=======Test Reset Option=====") - output.puts("void resetTest(void);") - output.puts("void resetTest(void)") - output.puts("{") - output.puts(" CMock_Verify();") unless (used_mocks.empty?) - output.puts(" CMock_Destroy();") unless (used_mocks.empty?) - output.puts(" #{@options[:teardown_name]}();") - output.puts(" CMock_Init();") unless (used_mocks.empty?) - output.puts(" #{@options[:setup_name]}();") - output.puts("}") - end - - def create_main(output, filename, tests, used_mocks) - output.puts("\n\n//=======MAIN=====") - output.puts("int main(void)") - output.puts("{") - output.puts(" suite_setup();") unless @options[:suite_setup].nil? - output.puts(" UnityBegin(\"#{filename.gsub(/\\/,'\\\\')}\");") - if (@options[:use_param_tests]) - tests.each do |test| - if ((test[:args].nil?) or (test[:args].empty?)) - output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]}, RUN_TEST_NO_ARGS);") - else - test[:args].each {|args| output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]}, #{args});")} - end - end - else - tests.each { |test| output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]});") } - end - output.puts() - output.puts(" CMock_Guts_MemFreeFinal();") unless used_mocks.empty? - output.puts(" return #{@options[:suite_teardown].nil? ? "" : "suite_teardown"}(UnityEnd());") - output.puts("}") - end - - def create_h_file(output, filename, tests, testfile_includes) - filename = filename.upcase.gsub(/(?:\/|\\|\.)*/,'_') - output.puts("/* AUTOGENERATED FILE. DO NOT EDIT. */") - output.puts("#ifndef _#{filename}") - output.puts("#define _#{filename}\n\n") - @options[:includes].flatten.uniq.compact.each do |inc| - output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") - end - testfile_includes.each do |inc| - output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") - end - output.puts "\n" - tests.each {|test| output.puts("void #{test[:test]}(#{test[:params]});") } - output.puts("#endif\n\n") - end -end - -if ($0 == __FILE__) - options = { :includes => [] } - yaml_file = nil - - #parse out all the options first (these will all be removed as we go) - ARGV.reject! do |arg| - case(arg) - when '-cexception' - options[:plugins] = [:cexception]; true - when /\.*\.ya?ml/ - options = UnityTestRunnerGenerator.grab_config(arg); true - when /\.*\.h/ - options[:includes] << arg; true - when /--(\w+)=\"?(.*)\"?/ - options[$1.to_sym] = $2; true - else false - end - end - - #make sure there is at least one parameter left (the input file) - if !ARGV[0] - puts ["\nusage: ruby #{__FILE__} (files) (options) input_test_file (output)", - "\n input_test_file - this is the C file you want to create a runner for", - " output - this is the name of the runner file to generate", - " defaults to (input_test_file)_Runner", - " files:", - " *.yml / *.yaml - loads configuration from here in :unity or :cmock", - " *.h - header files are added as #includes in runner", - " options:", - " -cexception - include cexception support", - " --setup_name=\"\" - redefine setUp func name to something else", - " --teardown_name=\"\" - redefine tearDown func name to something else", - " --test_prefix=\"\" - redefine test prefix from default test|spec|should", - " --suite_setup=\"\" - code to execute for setup of entire suite", - " --suite_teardown=\"\" - code to execute for teardown of entire suite", - " --use_param_tests=1 - enable parameterized tests (disabled by default)", - " --header_file=\"\" - path/name of test header file to generate too" - ].join("\n") - exit 1 - end - - #create the default test runner name if not specified - ARGV[1] = ARGV[0].gsub(".c","_Runner.c") if (!ARGV[1]) - - puts UnityTestRunnerGenerator.new(options).run(ARGV[0], ARGV[1]).inspect -end +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +$QUICK_RUBY_VERSION = RUBY_VERSION.split('.').inject(0){|vv,v| vv * 100 + v.to_i } +File.expand_path(File.join(File.dirname(__FILE__),'colour_prompt')) + +class UnityTestRunnerGenerator + + def initialize(options = nil) + @options = UnityTestRunnerGenerator.default_options + case(options) + when NilClass then @options + when String then @options.merge!(UnityTestRunnerGenerator.grab_config(options)) + when Hash then @options.merge!(options) + else raise "If you specify arguments, it should be a filename or a hash of options" + end + require "#{File.expand_path(File.dirname(__FILE__))}/type_sanitizer" + end + + def self.default_options + { + :includes => [], + :plugins => [], + :framework => :unity, + :test_prefix => "test|spec|should", + :setup_name => "setUp", + :teardown_name => "tearDown", + } + end + + def self.grab_config(config_file) + options = self.default_options + unless (config_file.nil? or config_file.empty?) + require 'yaml' + yaml_guts = YAML.load_file(config_file) + options.merge!(yaml_guts[:unity] || yaml_guts[:cmock]) + raise "No :unity or :cmock section found in #{config_file}" unless options + end + return(options) + end + + def run(input_file, output_file, options=nil) + tests = [] + testfile_includes = [] + used_mocks = [] + + @options.merge!(options) unless options.nil? + module_name = File.basename(input_file) + + #pull required data from source file + source = File.read(input_file) + source = source.force_encoding("ISO-8859-1").encode("utf-8", :replace => nil) if ($QUICK_RUBY_VERSION > 10900) + tests = find_tests(source) + headers = find_includes(source) + testfile_includes = (headers[:local] + headers[:system]) + used_mocks = find_mocks(testfile_includes) + testfile_includes = (testfile_includes - used_mocks) + testfile_includes.delete_if{|inc| inc =~ /(unity|cmock)/} + + #build runner file + generate(input_file, output_file, tests, used_mocks, testfile_includes) + + #determine which files were used to return them + all_files_used = [input_file, output_file] + all_files_used += testfile_includes.map {|filename| filename + '.c'} unless testfile_includes.empty? + all_files_used += @options[:includes] unless @options[:includes].empty? + return all_files_used.uniq + end + + def generate(input_file, output_file, tests, used_mocks, testfile_includes) + File.open(output_file, 'w') do |output| + create_header(output, used_mocks, testfile_includes) + create_externs(output, tests, used_mocks) + create_mock_management(output, used_mocks) + create_suite_setup_and_teardown(output) + create_reset(output, used_mocks) + create_main(output, input_file, tests, used_mocks) + end + + if (@options[:header_file] && !@options[:header_file].empty?) + File.open(@options[:header_file], 'w') do |output| + create_h_file(output, @options[:header_file], tests, testfile_includes) + end + end + end + + def find_tests(source) + tests_and_line_numbers = [] + + source_scrubbed = source.gsub(/\/\/.*$/, '') # remove line comments + source_scrubbed = source_scrubbed.gsub(/\/\*.*?\*\//m, '') # remove block comments + lines = source_scrubbed.split(/(^\s*\#.*$) # Treat preprocessor directives as a logical line + | (;|\{|\}) /x) # Match ;, {, and } as end of lines + + lines.each_with_index do |line, index| + #find tests + if line =~ /^((?:\s*TEST_CASE\s*\(.*?\)\s*)*)\s*void\s+((?:#{@options[:test_prefix]}).*)\s*\(\s*(.*)\s*\)/ + arguments = $1 + name = $2 + call = $3 + params = $4 + args = nil + if (@options[:use_param_tests] and !arguments.empty?) + args = [] + arguments.scan(/\s*TEST_CASE\s*\((.*)\)\s*$/) {|a| args << a[0]} + end + tests_and_line_numbers << { :test => name, :args => args, :call => call, :params => params, :line_number => 0 } + end + end + tests_and_line_numbers.uniq! {|v| v[:test] } + + #determine line numbers and create tests to run + source_lines = source.split("\n") + source_index = 0; + tests_and_line_numbers.size.times do |i| + source_lines[source_index..-1].each_with_index do |line, index| + if (line =~ /#{tests_and_line_numbers[i][:test]}/) + source_index += index + tests_and_line_numbers[i][:line_number] = source_index + 1 + break + end + end + end + + return tests_and_line_numbers + end + + def find_includes(source) + + #remove comments (block and line, in three steps to ensure correct precedence) + source.gsub!(/\/\/(?:.+\/\*|\*(?:$|[^\/])).*$/, '') # remove line comments that comment out the start of blocks + source.gsub!(/\/\*.*?\*\//m, '') # remove block comments + source.gsub!(/\/\/.*$/, '') # remove line comments (all that remain) + + #parse out includes + includes = { + :local => source.scan(/^\s*#include\s+\"\s*(.+)\.[hH]\s*\"/).flatten, + :system => source.scan(/^\s*#include\s+<\s*(.+)\s*>/).flatten.map { |inc| "<#{inc}>" } + } + return includes + end + + def find_mocks(includes) + mock_headers = [] + includes.each do |include_file| + mock_headers << File.basename(include_file) if (include_file =~ /^mock/i) + end + return mock_headers + end + + def create_header(output, mocks, testfile_includes=[]) + output.puts('/* AUTOGENERATED FILE. DO NOT EDIT. */') + create_runtest(output, mocks) + output.puts("\n//=======Automagically Detected Files To Include=====") + output.puts("#include \"#{@options[:framework].to_s}.h\"") + output.puts('#include "cmock.h"') unless (mocks.empty?) + output.puts('#include ') + output.puts('#include ') + output.puts('#include "CException.h"') if @options[:plugins].include?(:cexception) + if (@options[:header_file] && !@options[:header_file].empty?) + output.puts("#include \"#{File.basename(@options[:header_file])}\"") + else + @options[:includes].flatten.uniq.compact.each do |inc| + output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") + end + testfile_includes.each do |inc| + output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") + end + end + mocks.each do |mock| + output.puts("#include \"#{mock.gsub('.h','')}.h\"") + end + if @options[:enforce_strict_ordering] + output.puts('') + output.puts('int GlobalExpectCount;') + output.puts('int GlobalVerifyOrder;') + output.puts('char* GlobalOrderError;') + end + end + + def create_externs(output, tests, mocks) + output.puts("\n//=======External Functions This Runner Calls=====") + output.puts("extern void #{@options[:setup_name]}(void);") + output.puts("extern void #{@options[:teardown_name]}(void);") + tests.each do |test| + output.puts("extern void #{test[:test]}(#{test[:call] || 'void'});") + end + output.puts('') + end + + def create_mock_management(output, mocks) + unless (mocks.empty?) + output.puts("\n//=======Mock Management=====") + output.puts("static void CMock_Init(void)") + output.puts("{") + if @options[:enforce_strict_ordering] + output.puts(" GlobalExpectCount = 0;") + output.puts(" GlobalVerifyOrder = 0;") + output.puts(" GlobalOrderError = NULL;") + end + mocks.each do |mock| + mock_clean = TypeSanitizer.sanitize_c_identifier(mock) + output.puts(" #{mock_clean}_Init();") + end + output.puts("}\n") + + output.puts("static void CMock_Verify(void)") + output.puts("{") + mocks.each do |mock| + mock_clean = TypeSanitizer.sanitize_c_identifier(mock) + output.puts(" #{mock_clean}_Verify();") + end + output.puts("}\n") + + output.puts("static void CMock_Destroy(void)") + output.puts("{") + mocks.each do |mock| + mock_clean = TypeSanitizer.sanitize_c_identifier(mock) + output.puts(" #{mock_clean}_Destroy();") + end + output.puts("}\n") + end + end + + def create_suite_setup_and_teardown(output) + unless (@options[:suite_setup].nil?) + output.puts("\n//=======Suite Setup=====") + output.puts("static int suite_setup(void)") + output.puts("{") + output.puts(@options[:suite_setup]) + output.puts("}") + end + unless (@options[:suite_teardown].nil?) + output.puts("\n//=======Suite Teardown=====") + output.puts("static int suite_teardown(int num_failures)") + output.puts("{") + output.puts(@options[:suite_teardown]) + output.puts("}") + end + end + + def create_runtest(output, used_mocks) + cexception = @options[:plugins].include? :cexception + va_args1 = @options[:use_param_tests] ? ', ...' : '' + va_args2 = @options[:use_param_tests] ? '__VA_ARGS__' : '' + output.puts("\n//=======Test Runner Used To Run Each Test Below=====") + output.puts("#define RUN_TEST_NO_ARGS") if @options[:use_param_tests] + output.puts("#define RUN_TEST(TestFunc, TestLineNum#{va_args1}) \\") + output.puts("{ \\") + output.puts(" Unity.CurrentTestName = #TestFunc#{va_args2.empty? ? '' : " \"(\" ##{va_args2} \")\""}; \\") + output.puts(" Unity.CurrentTestLineNumber = TestLineNum; \\") + output.puts(" Unity.NumberOfTests++; \\") + output.puts(" CMock_Init(); \\") unless (used_mocks.empty?) + output.puts(" UNITY_CLR_DETAILS(); \\") unless (used_mocks.empty?) + output.puts(" if (TEST_PROTECT()) \\") + output.puts(" { \\") + output.puts(" CEXCEPTION_T e; \\") if cexception + output.puts(" Try { \\") if cexception + output.puts(" #{@options[:setup_name]}(); \\") + output.puts(" TestFunc(#{va_args2}); \\") + output.puts(" } Catch(e) { TEST_ASSERT_EQUAL_HEX32_MESSAGE(CEXCEPTION_NONE, e, \"Unhandled Exception!\"); } \\") if cexception + output.puts(" } \\") + output.puts(" if (TEST_PROTECT() && !TEST_IS_IGNORED) \\") + output.puts(" { \\") + output.puts(" #{@options[:teardown_name]}(); \\") + output.puts(" CMock_Verify(); \\") unless (used_mocks.empty?) + output.puts(" } \\") + output.puts(" CMock_Destroy(); \\") unless (used_mocks.empty?) + output.puts(" UnityConcludeTest(); \\") + output.puts("}\n") + end + + def create_reset(output, used_mocks) + output.puts("\n//=======Test Reset Option=====") + output.puts("void resetTest(void);") + output.puts("void resetTest(void)") + output.puts("{") + output.puts(" CMock_Verify();") unless (used_mocks.empty?) + output.puts(" CMock_Destroy();") unless (used_mocks.empty?) + output.puts(" #{@options[:teardown_name]}();") + output.puts(" CMock_Init();") unless (used_mocks.empty?) + output.puts(" #{@options[:setup_name]}();") + output.puts("}") + end + + def create_main(output, filename, tests, used_mocks) + output.puts("\n\n//=======MAIN=====") + output.puts("int main(void)") + output.puts("{") + output.puts(" suite_setup();") unless @options[:suite_setup].nil? + output.puts(" UnityBegin(\"#{filename.gsub(/\\/,'\\\\')}\");") + if (@options[:use_param_tests]) + tests.each do |test| + if ((test[:args].nil?) or (test[:args].empty?)) + output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]}, RUN_TEST_NO_ARGS);") + else + test[:args].each {|args| output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]}, #{args});")} + end + end + else + tests.each { |test| output.puts(" RUN_TEST(#{test[:test]}, #{test[:line_number]});") } + end + output.puts() + output.puts(" CMock_Guts_MemFreeFinal();") unless used_mocks.empty? + output.puts(" return #{@options[:suite_teardown].nil? ? "" : "suite_teardown"}(UnityEnd());") + output.puts("}") + end + + def create_h_file(output, filename, tests, testfile_includes) + filename = filename.upcase.gsub(/(?:\/|\\|\.)*/,'_') + output.puts("/* AUTOGENERATED FILE. DO NOT EDIT. */") + output.puts("#ifndef _#{filename}") + output.puts("#define _#{filename}\n\n") + @options[:includes].flatten.uniq.compact.each do |inc| + output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") + end + testfile_includes.each do |inc| + output.puts("#include #{inc.include?('<') ? inc : "\"#{inc.gsub('.h','')}.h\""}") + end + output.puts "\n" + tests.each {|test| output.puts("void #{test[:test]}(#{test[:params]});") } + output.puts("#endif\n\n") + end +end + +if ($0 == __FILE__) + options = { :includes => [] } + yaml_file = nil + + #parse out all the options first (these will all be removed as we go) + ARGV.reject! do |arg| + case(arg) + when '-cexception' + options[:plugins] = [:cexception]; true + when /\.*\.ya?ml/ + options = UnityTestRunnerGenerator.grab_config(arg); true + when /\.*\.h/ + options[:includes] << arg; true + when /--(\w+)=\"?(.*)\"?/ + options[$1.to_sym] = $2; true + else false + end + end + + #make sure there is at least one parameter left (the input file) + if !ARGV[0] + puts ["\nusage: ruby #{__FILE__} (files) (options) input_test_file (output)", + "\n input_test_file - this is the C file you want to create a runner for", + " output - this is the name of the runner file to generate", + " defaults to (input_test_file)_Runner", + " files:", + " *.yml / *.yaml - loads configuration from here in :unity or :cmock", + " *.h - header files are added as #includes in runner", + " options:", + " -cexception - include cexception support", + " --setup_name=\"\" - redefine setUp func name to something else", + " --teardown_name=\"\" - redefine tearDown func name to something else", + " --test_prefix=\"\" - redefine test prefix from default test|spec|should", + " --suite_setup=\"\" - code to execute for setup of entire suite", + " --suite_teardown=\"\" - code to execute for teardown of entire suite", + " --use_param_tests=1 - enable parameterized tests (disabled by default)", + " --header_file=\"\" - path/name of test header file to generate too" + ].join("\n") + exit 1 + end + + #create the default test runner name if not specified + ARGV[1] = ARGV[0].gsub(".c","_Runner.c") if (!ARGV[1]) + + puts UnityTestRunnerGenerator.new(options).run(ARGV[0], ARGV[1]).inspect +end diff --git a/auto/unity_test_summary.py b/auto/unity_test_summary.py index f226e2b..8b4574e 100644 --- a/auto/unity_test_summary.py +++ b/auto/unity_test_summary.py @@ -1,135 +1,135 @@ -#! python3 -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2015 Alexander Mueller / XelaRellum@web.de -# [Released under MIT License. Please refer to license.txt for details] -# Based on the ruby script by Mike Karlesky, Mark VanderVoord, Greg Williams -# ========================================== -import sys -import os -import re -from glob import glob - -class UnityTestSummary: - def __init__(self): - self.report = '' - self.total_tests = 0 - self.failures = 0 - self.ignored = 0 - - def run(self): - # Clean up result file names - results = [] - for target in self.targets: - results.append(target.replace('\\', '/')) - - # Dig through each result file, looking for details on pass/fail: - failure_output = [] - ignore_output = [] - - for result_file in results: - lines = list(map(lambda line: line.rstrip(), open(result_file, "r").read().split('\n'))) - if len(lines) == 0: - raise Exception("Empty test result file: %s" % result_file) - - details = self.get_details(result_file, lines) - failures = details['failures'] - ignores = details['ignores'] - if len(failures) > 0: failure_output.append('\n'.join(failures)) - if len(ignores) > 0: ignore_output.append('n'.join(ignores)) - tests,failures,ignored = self.parse_test_summary('\n'.join(lines)) - self.total_tests += tests - self.failures += failures - self.ignored += ignored - - if self.ignored > 0: - self.report += "\n" - self.report += "--------------------------\n" - self.report += "UNITY IGNORED TEST SUMMARY\n" - self.report += "--------------------------\n" - self.report += "\n".join(ignore_output) - - if self.failures > 0: - self.report += "\n" - self.report += "--------------------------\n" - self.report += "UNITY FAILED TEST SUMMARY\n" - self.report += "--------------------------\n" - self.report += '\n'.join(failure_output) - - self.report += "\n" - self.report += "--------------------------\n" - self.report += "OVERALL UNITY TEST SUMMARY\n" - self.report += "--------------------------\n" - self.report += "{total_tests} TOTAL TESTS {failures} TOTAL FAILURES {ignored} IGNORED\n".format(total_tests = self.total_tests, failures=self.failures, ignored=self.ignored) - self.report += "\n" - - return self.report - - def set_targets(self, target_array): - self.targets = target_array - - def set_root_path(self, path): - self.root = path - - def usage(self, err_msg=None): - print("\nERROR: ") - if err_msg: - print(err_msg) - print("\nUsage: unity_test_summary.py result_file_directory/ root_path/") - print(" result_file_directory - The location of your results files.") - print(" Defaults to current directory if not specified.") - print(" Should end in / if specified.") - print(" root_path - Helpful for producing more verbose output if using relative paths.") - sys.exit(1) - - def get_details(self, result_file, lines): - results = { 'failures': [], 'ignores': [], 'successes': [] } - for line in lines: - parts = line.split(':') - if len(parts) != 5: - continue - src_file,src_line,test_name,status,msg = parts - if len(self.root) > 0: - line_out = "%s%s" % (self.root, line) - else: - line_out = line - if status == 'IGNORE': - results['ignores'].append(line_out) - elif status == 'FAIL': - results['failures'].append(line_out) - elif status == 'PASS': - results['successes'].append(line_out) - return results - - def parse_test_summary(self, summary): - m = re.search(r"([0-9]+) Tests ([0-9]+) Failures ([0-9]+) Ignored", summary) - if not m: - raise Exception("Couldn't parse test results: %s" % summary) - - return int(m.group(1)), int(m.group(2)), int(m.group(3)) - - -if __name__ == '__main__': - uts = UnityTestSummary() - try: - #look in the specified or current directory for result files - if len(sys.argv) > 1: - targets_dir = sys.argv[1] - else: - targets_dir = './' - targets = list(map(lambda x: x.replace('\\', '/'), glob(targets_dir + '*.test*'))) - if len(targets) == 0: - raise Exception("No *.testpass or *.testfail files found in '%s'" % targets_dir) - uts.set_targets(targets) - - #set the root path - if len(sys.argv) > 2: - root_path = sys.argv[2] - else: - root_path = os.path.split(__file__)[0] - uts.set_root_path(root_path) - - #run the summarizer - print(uts.run()) - except Exception as e: - uts.usage(e) +#! python3 +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2015 Alexander Mueller / XelaRellum@web.de +# [Released under MIT License. Please refer to license.txt for details] +# Based on the ruby script by Mike Karlesky, Mark VanderVoord, Greg Williams +# ========================================== +import sys +import os +import re +from glob import glob + +class UnityTestSummary: + def __init__(self): + self.report = '' + self.total_tests = 0 + self.failures = 0 + self.ignored = 0 + + def run(self): + # Clean up result file names + results = [] + for target in self.targets: + results.append(target.replace('\\', '/')) + + # Dig through each result file, looking for details on pass/fail: + failure_output = [] + ignore_output = [] + + for result_file in results: + lines = list(map(lambda line: line.rstrip(), open(result_file, "r").read().split('\n'))) + if len(lines) == 0: + raise Exception("Empty test result file: %s" % result_file) + + details = self.get_details(result_file, lines) + failures = details['failures'] + ignores = details['ignores'] + if len(failures) > 0: failure_output.append('\n'.join(failures)) + if len(ignores) > 0: ignore_output.append('n'.join(ignores)) + tests,failures,ignored = self.parse_test_summary('\n'.join(lines)) + self.total_tests += tests + self.failures += failures + self.ignored += ignored + + if self.ignored > 0: + self.report += "\n" + self.report += "--------------------------\n" + self.report += "UNITY IGNORED TEST SUMMARY\n" + self.report += "--------------------------\n" + self.report += "\n".join(ignore_output) + + if self.failures > 0: + self.report += "\n" + self.report += "--------------------------\n" + self.report += "UNITY FAILED TEST SUMMARY\n" + self.report += "--------------------------\n" + self.report += '\n'.join(failure_output) + + self.report += "\n" + self.report += "--------------------------\n" + self.report += "OVERALL UNITY TEST SUMMARY\n" + self.report += "--------------------------\n" + self.report += "{total_tests} TOTAL TESTS {failures} TOTAL FAILURES {ignored} IGNORED\n".format(total_tests = self.total_tests, failures=self.failures, ignored=self.ignored) + self.report += "\n" + + return self.report + + def set_targets(self, target_array): + self.targets = target_array + + def set_root_path(self, path): + self.root = path + + def usage(self, err_msg=None): + print("\nERROR: ") + if err_msg: + print(err_msg) + print("\nUsage: unity_test_summary.py result_file_directory/ root_path/") + print(" result_file_directory - The location of your results files.") + print(" Defaults to current directory if not specified.") + print(" Should end in / if specified.") + print(" root_path - Helpful for producing more verbose output if using relative paths.") + sys.exit(1) + + def get_details(self, result_file, lines): + results = { 'failures': [], 'ignores': [], 'successes': [] } + for line in lines: + parts = line.split(':') + if len(parts) != 5: + continue + src_file,src_line,test_name,status,msg = parts + if len(self.root) > 0: + line_out = "%s%s" % (self.root, line) + else: + line_out = line + if status == 'IGNORE': + results['ignores'].append(line_out) + elif status == 'FAIL': + results['failures'].append(line_out) + elif status == 'PASS': + results['successes'].append(line_out) + return results + + def parse_test_summary(self, summary): + m = re.search(r"([0-9]+) Tests ([0-9]+) Failures ([0-9]+) Ignored", summary) + if not m: + raise Exception("Couldn't parse test results: %s" % summary) + + return int(m.group(1)), int(m.group(2)), int(m.group(3)) + + +if __name__ == '__main__': + uts = UnityTestSummary() + try: + #look in the specified or current directory for result files + if len(sys.argv) > 1: + targets_dir = sys.argv[1] + else: + targets_dir = './' + targets = list(map(lambda x: x.replace('\\', '/'), glob(targets_dir + '*.test*'))) + if len(targets) == 0: + raise Exception("No *.testpass or *.testfail files found in '%s'" % targets_dir) + uts.set_targets(targets) + + #set the root path + if len(sys.argv) > 2: + root_path = sys.argv[2] + else: + root_path = os.path.split(__file__)[0] + uts.set_root_path(root_path) + + #run the summarizer + print(uts.run()) + except Exception as e: + uts.usage(e) diff --git a/auto/unity_test_summary.rb b/auto/unity_test_summary.rb index 39e3094..8f992e5 100644 --- a/auto/unity_test_summary.rb +++ b/auto/unity_test_summary.rb @@ -1,148 +1,148 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -#!/usr/bin/ruby -# -# unity_test_summary.rb -# -require 'fileutils' -require 'set' - -class UnityTestSummary - include FileUtils::Verbose - - attr_reader :report, :total_tests, :failures, :ignored - - def initialize(opts = {}) - @report = '' - @total_tests = 0 - @failures = 0 - @ignored = 0 - - - end - - def run - # Clean up result file names - results = @targets.map {|target| target.gsub(/\\/,'/')} - - # Dig through each result file, looking for details on pass/fail: - failure_output = [] - ignore_output = [] - - results.each do |result_file| - lines = File.readlines(result_file).map { |line| line.chomp } - if lines.length == 0 - raise "Empty test result file: #{result_file}" - else - output = get_details(result_file, lines) - failure_output << output[:failures] unless output[:failures].empty? - ignore_output << output[:ignores] unless output[:ignores].empty? - tests,failures,ignored = parse_test_summary(lines) - @total_tests += tests - @failures += failures - @ignored += ignored - end - end - - if @ignored > 0 - @report += "\n" - @report += "--------------------------\n" - @report += "UNITY IGNORED TEST SUMMARY\n" - @report += "--------------------------\n" - @report += ignore_output.flatten.join("\n") - end - - if @failures > 0 - @report += "\n" - @report += "--------------------------\n" - @report += "UNITY FAILED TEST SUMMARY\n" - @report += "--------------------------\n" - @report += failure_output.flatten.join("\n") - end - - @report += "\n" - @report += "--------------------------\n" - @report += "OVERALL UNITY TEST SUMMARY\n" - @report += "--------------------------\n" - @report += "#{@total_tests} TOTAL TESTS #{@failures} TOTAL FAILURES #{@ignored} IGNORED\n" - @report += "\n" - end - - def set_targets(target_array) - @targets = target_array - end - - def set_root_path(path) - @root = path - end - - def usage(err_msg=nil) - puts "\nERROR: " - puts err_msg if err_msg - puts "\nUsage: unity_test_summary.rb result_file_directory/ root_path/" - puts " result_file_directory - The location of your results files." - puts " Defaults to current directory if not specified." - puts " Should end in / if specified." - puts " root_path - Helpful for producing more verbose output if using relative paths." - exit 1 - end - - protected - - def get_details(result_file, lines) - results = { :failures => [], :ignores => [], :successes => [] } - lines.each do |line| - src_file,src_line,test_name,status,msg = line.split(/:/) - line_out = ((@root && (@root != 0)) ? "#{@root}#{line}" : line ).gsub(/\//, "\\") - case(status) - when 'IGNORE' then results[:ignores] << line_out - when 'FAIL' then results[:failures] << line_out - when 'PASS' then results[:successes] << line_out - end - end - return results - end - - def parse_test_summary(summary) - if summary.find { |v| v =~ /(\d+) Tests (\d+) Failures (\d+) Ignored/ } - [$1.to_i,$2.to_i,$3.to_i] - else - raise "Couldn't parse test results: #{summary}" - end - end - - def here; File.expand_path(File.dirname(__FILE__)); end - -end - -if $0 == __FILE__ - - #parse out the command options - opts, args = ARGV.partition {|v| v =~ /^--\w+/} - opts.map! {|v| v[2..-1].to_sym } - - #create an instance to work with - uts = UnityTestSummary.new(opts) - - begin - #look in the specified or current directory for result files - args[0] ||= './' - targets = "#{ARGV[0].gsub(/\\/, '/')}**/*.test*" - results = Dir[targets] - raise "No *.testpass, *.testfail, or *.testresults files found in '#{targets}'" if results.empty? - uts.set_targets(results) - - #set the root path - args[1] ||= Dir.pwd + '/' - uts.set_root_path(ARGV[1]) - - #run the summarizer - puts uts.run - rescue Exception => e - uts.usage e.message - end -end +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +#!/usr/bin/ruby +# +# unity_test_summary.rb +# +require 'fileutils' +require 'set' + +class UnityTestSummary + include FileUtils::Verbose + + attr_reader :report, :total_tests, :failures, :ignored + + def initialize(opts = {}) + @report = '' + @total_tests = 0 + @failures = 0 + @ignored = 0 + + + end + + def run + # Clean up result file names + results = @targets.map {|target| target.gsub(/\\/,'/')} + + # Dig through each result file, looking for details on pass/fail: + failure_output = [] + ignore_output = [] + + results.each do |result_file| + lines = File.readlines(result_file).map { |line| line.chomp } + if lines.length == 0 + raise "Empty test result file: #{result_file}" + else + output = get_details(result_file, lines) + failure_output << output[:failures] unless output[:failures].empty? + ignore_output << output[:ignores] unless output[:ignores].empty? + tests,failures,ignored = parse_test_summary(lines) + @total_tests += tests + @failures += failures + @ignored += ignored + end + end + + if @ignored > 0 + @report += "\n" + @report += "--------------------------\n" + @report += "UNITY IGNORED TEST SUMMARY\n" + @report += "--------------------------\n" + @report += ignore_output.flatten.join("\n") + end + + if @failures > 0 + @report += "\n" + @report += "--------------------------\n" + @report += "UNITY FAILED TEST SUMMARY\n" + @report += "--------------------------\n" + @report += failure_output.flatten.join("\n") + end + + @report += "\n" + @report += "--------------------------\n" + @report += "OVERALL UNITY TEST SUMMARY\n" + @report += "--------------------------\n" + @report += "#{@total_tests} TOTAL TESTS #{@failures} TOTAL FAILURES #{@ignored} IGNORED\n" + @report += "\n" + end + + def set_targets(target_array) + @targets = target_array + end + + def set_root_path(path) + @root = path + end + + def usage(err_msg=nil) + puts "\nERROR: " + puts err_msg if err_msg + puts "\nUsage: unity_test_summary.rb result_file_directory/ root_path/" + puts " result_file_directory - The location of your results files." + puts " Defaults to current directory if not specified." + puts " Should end in / if specified." + puts " root_path - Helpful for producing more verbose output if using relative paths." + exit 1 + end + + protected + + def get_details(result_file, lines) + results = { :failures => [], :ignores => [], :successes => [] } + lines.each do |line| + src_file,src_line,test_name,status,msg = line.split(/:/) + line_out = ((@root && (@root != 0)) ? "#{@root}#{line}" : line ).gsub(/\//, "\\") + case(status) + when 'IGNORE' then results[:ignores] << line_out + when 'FAIL' then results[:failures] << line_out + when 'PASS' then results[:successes] << line_out + end + end + return results + end + + def parse_test_summary(summary) + if summary.find { |v| v =~ /(\d+) Tests (\d+) Failures (\d+) Ignored/ } + [$1.to_i,$2.to_i,$3.to_i] + else + raise "Couldn't parse test results: #{summary}" + end + end + + def here; File.expand_path(File.dirname(__FILE__)); end + +end + +if $0 == __FILE__ + + #parse out the command options + opts, args = ARGV.partition {|v| v =~ /^--\w+/} + opts.map! {|v| v[2..-1].to_sym } + + #create an instance to work with + uts = UnityTestSummary.new(opts) + + begin + #look in the specified or current directory for result files + args[0] ||= './' + targets = "#{ARGV[0].gsub(/\\/, '/')}**/*.test*" + results = Dir[targets] + raise "No *.testpass, *.testfail, or *.testresults files found in '#{targets}'" if results.empty? + uts.set_targets(results) + + #set the root path + args[1] ||= Dir.pwd + '/' + uts.set_root_path(ARGV[1]) + + #run the summarizer + puts uts.run + rescue Exception => e + uts.usage e.message + end +end diff --git a/docs/Unity Summary.txt b/docs/Unity Summary.txt index fa83e51..a11ce3c 100644 --- a/docs/Unity Summary.txt +++ b/docs/Unity Summary.txt @@ -1,224 +1,224 @@ -============== -Unity Test API -============== - -[Copyright (c) 2007 - 2012 Unity Project by Mike Karlesky, Mark VanderVoord, and Greg Williams] - -------------- -Running Tests -------------- - -RUN_TEST(func, linenum) - -Each Test is run within the macro RUN_TEST. This macro performs necessary setup before the test is called and handles cleanup and result tabulation afterwards. - --------------- -Ignoring Tests --------------- - -There are times when a test is incomplete or not valid for some reason. At these times, TEST_IGNORE can be called. Control will immediately be returned to the caller of the test, and no failures will be returned. - -TEST_IGNORE() - -Ignore this test and return immediately - -TEST_IGNORE_MESSAGE (message) - -Ignore this test and return immediately. Output a message stating why the test was ignored. - --------------- -Aborting Tests --------------- - -There are times when a test will contain an infinite loop on error conditions, or there may be reason to escape from the test early without executing the rest of the test. A pair of macros support this functionality in Unity. The first (TEST_PROTECT) sets up the feature, and handles emergency abort cases. TEST_ABORT can then be used at any time within the tests to return to the last TEST_PROTECT call. - -TEST_PROTECT() - -Setup and Catch macro - -TEST_ABORT() - -Abort Test macro - -Example: - -main() -{ - if (TEST_PROTECT() == 0) - { - MyTest(); - } -} - -If MyTest calls TEST_ABORT, program control will immediately return to TEST_PROTECT with a non-zero return value. - - -======================= -Unity Assertion Summary -======================= - --------------------- -Basic Validity Tests --------------------- - -TEST_ASSERT_TRUE(condition) - -Evaluates whatever code is in condition and fails if it evaluates to false - -TEST_ASSERT_FALSE(condition) - -Evaluates whatever code is in condition and fails if it evaluates to true - -TEST_ASSERT(condition) - -Another way of calling TEST_ASSERT_TRUE - -TEST_ASSERT_UNLESS(condition) - -Another way of calling TEST_ASSERT_FALSE - -TEST_FAIL() -TEST_FAIL_MESSAGE(message) - -This test is automatically marked as a failure. The message is output stating why. - ------------------------------- -Numerical Assertions: Integers ------------------------------- - -TEST_ASSERT_EQUAL_INT(expected, actual) -TEST_ASSERT_EQUAL_INT8(expected, actual) -TEST_ASSERT_EQUAL_INT16(expected, actual) -TEST_ASSERT_EQUAL_INT32(expected, actual) -TEST_ASSERT_EQUAL_INT64(expected, actual) - -Compare two integers for equality and display errors as signed integers. A cast will be performed -to your natural integer size so often this can just be used. When you need to specify the exact size, -like when comparing arrays, you can use a specific version: - -TEST_ASSERT_EQUAL_UINT(expected, actual) -TEST_ASSERT_EQUAL_UINT8(expected, actual) -TEST_ASSERT_EQUAL_UINT16(expected, actual) -TEST_ASSERT_EQUAL_UINT32(expected, actual) -TEST_ASSERT_EQUAL_UINT64(expected, actual) - -Compare two integers for equality and display errors as unsigned integers. Like INT, there are -variants for different sizes also. - -TEST_ASSERT_EQUAL_HEX(expected, actual) -TEST_ASSERT_EQUAL_HEX8(expected, actual) -TEST_ASSERT_EQUAL_HEX16(expected, actual) -TEST_ASSERT_EQUAL_HEX32(expected, actual) -TEST_ASSERT_EQUAL_HEX64(expected, actual) - -Compares two integers for equality and display errors as hexadecimal. Like the other integer comparisons, -you can specify the size... here the size will also effect how many nibbles are shown (for example, HEX16 -will show 4 nibbles). - -_ARRAY - -You can append _ARRAY to any of these macros to make an array comparison of that type. Here you will -need to care a bit more about the actual size of the value being checked. You will also specify an -additional argument which is the number of elements to compare. For example: - -TEST_ASSERT_EQUAL_HEX8_ARRAY(expected, actual, elements) - -TEST_ASSERT_EQUAL(expected, actual) - -Another way of calling TEST_ASSERT_EQUAL_INT - -TEST_ASSERT_INT_WITHIN(delta, expected, actual) - -Asserts that the actual value is within plus or minus delta of the expected value. This also comes in -size specific variants. - - ------------------------------ -Numerical Assertions: Bitwise ------------------------------ - -TEST_ASSERT_BITS(mask, expected, actual) - -Use an integer mask to specify which bits should be compared between two other integers. High bits in the mask are compared, low bits ignored. - -TEST_ASSERT_BITS_HIGH(mask, actual) - -Use an integer mask to specify which bits should be inspected to determine if they are all set high. High bits in the mask are compared, low bits ignored. - -TEST_ASSERT_BITS_LOW(mask, actual) - -Use an integer mask to specify which bits should be inspected to determine if they are all set low. High bits in the mask are compared, low bits ignored. - -TEST_ASSERT_BIT_HIGH(bit, actual) - -Test a single bit and verify that it is high. The bit is specified 0-31 for a 32-bit integer. - -TEST_ASSERT_BIT_LOW(bit, actual) - -Test a single bit and verify that it is low. The bit is specified 0-31 for a 32-bit integer. - ----------------------------- -Numerical Assertions: Floats ----------------------------- - -TEST_ASSERT_FLOAT_WITHIN(delta, expected, actual) - -Asserts that the actual value is within plus or minus delta of the expected value. - -TEST_ASSERT_EQUAL_FLOAT(expected, actual) -TEST_ASSERT_EQUAL_DOUBLE(expected, actual) - -Asserts that two floating point values are "equal" within a small % delta of the expected value. - ------------------ -String Assertions ------------------ - -TEST_ASSERT_EQUAL_STRING(expected, actual) - -Compare two null-terminate strings. Fail if any character is different or if the lengths are different. - -TEST_ASSERT_EQUAL_STRING_LEN(expected, actual, len) - -Compare two strings. Fail if any character is different, stop comparing after len characters. - -TEST_ASSERT_EQUAL_STRING_MESSAGE(expected, actual, message) - -Compare two null-terminate strings. Fail if any character is different or if the lengths are different. Output a custom message on failure. - -TEST_ASSERT_EQUAL_STRING_LEN_MESSAGE(expected, actual, len, message) - -Compare two strings. Fail if any character is different, stop comparing after len characters. Output a custom message on failure. - ------------------- -Pointer Assertions ------------------- - -Most pointer operations can be performed by simply using the integer comparisons above. However, a couple of special cases are added for clarity. - -TEST_ASSERT_NULL(pointer) - -Fails if the pointer is not equal to NULL - -TEST_ASSERT_NOT_NULL(pointer) - -Fails if the pointer is equal to NULL - - ------------------ -Memory Assertions ------------------ - -TEST_ASSERT_EQUAL_MEMORY(expected, actual, len) - -Compare two blocks of memory. This is a good generic assertion for types that can't be coerced into acting like -standard types... but since it's a memory compare, you have to be careful that your data types are packed. - --------- -_MESSAGE --------- - -you can append _MESSAGE to any of the macros to make them take an additional argument. This argument -is a string that will be printed at the end of the failure strings. This is useful for specifying more -information about the problem. - +============== +Unity Test API +============== + +[Copyright (c) 2007 - 2012 Unity Project by Mike Karlesky, Mark VanderVoord, and Greg Williams] + +------------- +Running Tests +------------- + +RUN_TEST(func, linenum) + +Each Test is run within the macro RUN_TEST. This macro performs necessary setup before the test is called and handles cleanup and result tabulation afterwards. + +-------------- +Ignoring Tests +-------------- + +There are times when a test is incomplete or not valid for some reason. At these times, TEST_IGNORE can be called. Control will immediately be returned to the caller of the test, and no failures will be returned. + +TEST_IGNORE() + +Ignore this test and return immediately + +TEST_IGNORE_MESSAGE (message) + +Ignore this test and return immediately. Output a message stating why the test was ignored. + +-------------- +Aborting Tests +-------------- + +There are times when a test will contain an infinite loop on error conditions, or there may be reason to escape from the test early without executing the rest of the test. A pair of macros support this functionality in Unity. The first (TEST_PROTECT) sets up the feature, and handles emergency abort cases. TEST_ABORT can then be used at any time within the tests to return to the last TEST_PROTECT call. + +TEST_PROTECT() + +Setup and Catch macro + +TEST_ABORT() + +Abort Test macro + +Example: + +main() +{ + if (TEST_PROTECT() == 0) + { + MyTest(); + } +} + +If MyTest calls TEST_ABORT, program control will immediately return to TEST_PROTECT with a non-zero return value. + + +======================= +Unity Assertion Summary +======================= + +-------------------- +Basic Validity Tests +-------------------- + +TEST_ASSERT_TRUE(condition) + +Evaluates whatever code is in condition and fails if it evaluates to false + +TEST_ASSERT_FALSE(condition) + +Evaluates whatever code is in condition and fails if it evaluates to true + +TEST_ASSERT(condition) + +Another way of calling TEST_ASSERT_TRUE + +TEST_ASSERT_UNLESS(condition) + +Another way of calling TEST_ASSERT_FALSE + +TEST_FAIL() +TEST_FAIL_MESSAGE(message) + +This test is automatically marked as a failure. The message is output stating why. + +------------------------------ +Numerical Assertions: Integers +------------------------------ + +TEST_ASSERT_EQUAL_INT(expected, actual) +TEST_ASSERT_EQUAL_INT8(expected, actual) +TEST_ASSERT_EQUAL_INT16(expected, actual) +TEST_ASSERT_EQUAL_INT32(expected, actual) +TEST_ASSERT_EQUAL_INT64(expected, actual) + +Compare two integers for equality and display errors as signed integers. A cast will be performed +to your natural integer size so often this can just be used. When you need to specify the exact size, +like when comparing arrays, you can use a specific version: + +TEST_ASSERT_EQUAL_UINT(expected, actual) +TEST_ASSERT_EQUAL_UINT8(expected, actual) +TEST_ASSERT_EQUAL_UINT16(expected, actual) +TEST_ASSERT_EQUAL_UINT32(expected, actual) +TEST_ASSERT_EQUAL_UINT64(expected, actual) + +Compare two integers for equality and display errors as unsigned integers. Like INT, there are +variants for different sizes also. + +TEST_ASSERT_EQUAL_HEX(expected, actual) +TEST_ASSERT_EQUAL_HEX8(expected, actual) +TEST_ASSERT_EQUAL_HEX16(expected, actual) +TEST_ASSERT_EQUAL_HEX32(expected, actual) +TEST_ASSERT_EQUAL_HEX64(expected, actual) + +Compares two integers for equality and display errors as hexadecimal. Like the other integer comparisons, +you can specify the size... here the size will also effect how many nibbles are shown (for example, HEX16 +will show 4 nibbles). + +_ARRAY + +You can append _ARRAY to any of these macros to make an array comparison of that type. Here you will +need to care a bit more about the actual size of the value being checked. You will also specify an +additional argument which is the number of elements to compare. For example: + +TEST_ASSERT_EQUAL_HEX8_ARRAY(expected, actual, elements) + +TEST_ASSERT_EQUAL(expected, actual) + +Another way of calling TEST_ASSERT_EQUAL_INT + +TEST_ASSERT_INT_WITHIN(delta, expected, actual) + +Asserts that the actual value is within plus or minus delta of the expected value. This also comes in +size specific variants. + + +----------------------------- +Numerical Assertions: Bitwise +----------------------------- + +TEST_ASSERT_BITS(mask, expected, actual) + +Use an integer mask to specify which bits should be compared between two other integers. High bits in the mask are compared, low bits ignored. + +TEST_ASSERT_BITS_HIGH(mask, actual) + +Use an integer mask to specify which bits should be inspected to determine if they are all set high. High bits in the mask are compared, low bits ignored. + +TEST_ASSERT_BITS_LOW(mask, actual) + +Use an integer mask to specify which bits should be inspected to determine if they are all set low. High bits in the mask are compared, low bits ignored. + +TEST_ASSERT_BIT_HIGH(bit, actual) + +Test a single bit and verify that it is high. The bit is specified 0-31 for a 32-bit integer. + +TEST_ASSERT_BIT_LOW(bit, actual) + +Test a single bit and verify that it is low. The bit is specified 0-31 for a 32-bit integer. + +---------------------------- +Numerical Assertions: Floats +---------------------------- + +TEST_ASSERT_FLOAT_WITHIN(delta, expected, actual) + +Asserts that the actual value is within plus or minus delta of the expected value. + +TEST_ASSERT_EQUAL_FLOAT(expected, actual) +TEST_ASSERT_EQUAL_DOUBLE(expected, actual) + +Asserts that two floating point values are "equal" within a small % delta of the expected value. + +----------------- +String Assertions +----------------- + +TEST_ASSERT_EQUAL_STRING(expected, actual) + +Compare two null-terminate strings. Fail if any character is different or if the lengths are different. + +TEST_ASSERT_EQUAL_STRING_LEN(expected, actual, len) + +Compare two strings. Fail if any character is different, stop comparing after len characters. + +TEST_ASSERT_EQUAL_STRING_MESSAGE(expected, actual, message) + +Compare two null-terminate strings. Fail if any character is different or if the lengths are different. Output a custom message on failure. + +TEST_ASSERT_EQUAL_STRING_LEN_MESSAGE(expected, actual, len, message) + +Compare two strings. Fail if any character is different, stop comparing after len characters. Output a custom message on failure. + +------------------ +Pointer Assertions +------------------ + +Most pointer operations can be performed by simply using the integer comparisons above. However, a couple of special cases are added for clarity. + +TEST_ASSERT_NULL(pointer) + +Fails if the pointer is not equal to NULL + +TEST_ASSERT_NOT_NULL(pointer) + +Fails if the pointer is equal to NULL + + +----------------- +Memory Assertions +----------------- + +TEST_ASSERT_EQUAL_MEMORY(expected, actual, len) + +Compare two blocks of memory. This is a good generic assertion for types that can't be coerced into acting like +standard types... but since it's a memory compare, you have to be careful that your data types are packed. + +-------- +_MESSAGE +-------- + +you can append _MESSAGE to any of the macros to make them take an additional argument. This argument +is a string that will be printed at the end of the failure strings. This is useful for specifying more +information about the problem. + diff --git a/docs/license.txt b/docs/license.txt index d0f635f..d66fba5 100644 --- a/docs/license.txt +++ b/docs/license.txt @@ -1,21 +1,21 @@ -The MIT License (MIT) - -Copyright (c) 2007-14 Mike Karlesky, Mark VanderVoord, Greg Williams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +The MIT License (MIT) + +Copyright (c) 2007-14 Mike Karlesky, Mark VanderVoord, Greg Williams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/examples/example_1/test/test_runners/TestProductionCode2_Runner.c b/examples/example_1/test/test_runners/TestProductionCode2_Runner.c index b7e70a2..8aa3203 100644 --- a/examples/example_1/test/test_runners/TestProductionCode2_Runner.c +++ b/examples/example_1/test/test_runners/TestProductionCode2_Runner.c @@ -1,53 +1,53 @@ -/* AUTOGENERATED FILE. DO NOT EDIT. */ - -//=======Test Runner Used To Run Each Test Below===== -#define RUN_TEST(TestFunc, TestLineNum) \ -{ \ - Unity.CurrentTestName = #TestFunc; \ - Unity.CurrentTestLineNumber = TestLineNum; \ - Unity.NumberOfTests++; \ - if (TEST_PROTECT()) \ - { \ - setUp(); \ - TestFunc(); \ - } \ - if (TEST_PROTECT() && !TEST_IS_IGNORED) \ - { \ - tearDown(); \ - } \ - UnityConcludeTest(); \ -} - -//=======Automagically Detected Files To Include===== -#include "unity.h" -#include -#include -#include "ProductionCode2.h" - -//=======External Functions This Runner Calls===== -extern void setUp(void); -extern void tearDown(void); -extern void test_IgnoredTest(void); -extern void test_AnotherIgnoredTest(void); -extern void test_ThisFunctionHasNotBeenTested_NeedsToBeImplemented(void); - - -//=======Test Reset Option===== -void resetTest(void); -void resetTest(void) -{ - tearDown(); - setUp(); -} - - -//=======MAIN===== -int main(void) -{ - UnityBegin("test/TestProductionCode2.c"); - RUN_TEST(test_IgnoredTest, 18); - RUN_TEST(test_AnotherIgnoredTest, 23); - RUN_TEST(test_ThisFunctionHasNotBeenTested_NeedsToBeImplemented, 28); - - return (UnityEnd()); -} +/* AUTOGENERATED FILE. DO NOT EDIT. */ + +//=======Test Runner Used To Run Each Test Below===== +#define RUN_TEST(TestFunc, TestLineNum) \ +{ \ + Unity.CurrentTestName = #TestFunc; \ + Unity.CurrentTestLineNumber = TestLineNum; \ + Unity.NumberOfTests++; \ + if (TEST_PROTECT()) \ + { \ + setUp(); \ + TestFunc(); \ + } \ + if (TEST_PROTECT() && !TEST_IS_IGNORED) \ + { \ + tearDown(); \ + } \ + UnityConcludeTest(); \ +} + +//=======Automagically Detected Files To Include===== +#include "unity.h" +#include +#include +#include "ProductionCode2.h" + +//=======External Functions This Runner Calls===== +extern void setUp(void); +extern void tearDown(void); +extern void test_IgnoredTest(void); +extern void test_AnotherIgnoredTest(void); +extern void test_ThisFunctionHasNotBeenTested_NeedsToBeImplemented(void); + + +//=======Test Reset Option===== +void resetTest(void); +void resetTest(void) +{ + tearDown(); + setUp(); +} + + +//=======MAIN===== +int main(void) +{ + UnityBegin("test/TestProductionCode2.c"); + RUN_TEST(test_IgnoredTest, 18); + RUN_TEST(test_AnotherIgnoredTest, 23); + RUN_TEST(test_ThisFunctionHasNotBeenTested_NeedsToBeImplemented, 28); + + return (UnityEnd()); +} diff --git a/examples/example_1/test/test_runners/TestProductionCode_Runner.c b/examples/example_1/test/test_runners/TestProductionCode_Runner.c index c18025e..840a503 100644 --- a/examples/example_1/test/test_runners/TestProductionCode_Runner.c +++ b/examples/example_1/test/test_runners/TestProductionCode_Runner.c @@ -1,57 +1,57 @@ -/* AUTOGENERATED FILE. DO NOT EDIT. */ - -//=======Test Runner Used To Run Each Test Below===== -#define RUN_TEST(TestFunc, TestLineNum) \ -{ \ - Unity.CurrentTestName = #TestFunc; \ - Unity.CurrentTestLineNumber = TestLineNum; \ - Unity.NumberOfTests++; \ - if (TEST_PROTECT()) \ - { \ - setUp(); \ - TestFunc(); \ - } \ - if (TEST_PROTECT() && !TEST_IS_IGNORED) \ - { \ - tearDown(); \ - } \ - UnityConcludeTest(); \ -} - -//=======Automagically Detected Files To Include===== -#include "unity.h" -#include -#include -#include "ProductionCode.h" - -//=======External Functions This Runner Calls===== -extern void setUp(void); -extern void tearDown(void); -extern void test_FindFunction_WhichIsBroken_ShouldReturnZeroIfItemIsNotInList_WhichWorksEvenInOurBrokenCode(void); -extern void test_FindFunction_WhichIsBroken_ShouldReturnTheIndexForItemsInList_WhichWillFailBecauseOurFunctionUnderTestIsBroken(void); -extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValue(void); -extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValueAgain(void); -extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnCurrentCounter_ButFailsBecauseThisTestIsActuallyFlawed(void); - - -//=======Test Reset Option===== -void resetTest(void); -void resetTest(void) -{ - tearDown(); - setUp(); -} - - -//=======MAIN===== -int main(void) -{ - UnityBegin("test/TestProductionCode.c"); - RUN_TEST(test_FindFunction_WhichIsBroken_ShouldReturnZeroIfItemIsNotInList_WhichWorksEvenInOurBrokenCode, 20); - RUN_TEST(test_FindFunction_WhichIsBroken_ShouldReturnTheIndexForItemsInList_WhichWillFailBecauseOurFunctionUnderTestIsBroken, 30); - RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValue, 41); - RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValueAgain, 51); - RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnCurrentCounter_ButFailsBecauseThisTestIsActuallyFlawed, 57); - - return (UnityEnd()); -} +/* AUTOGENERATED FILE. DO NOT EDIT. */ + +//=======Test Runner Used To Run Each Test Below===== +#define RUN_TEST(TestFunc, TestLineNum) \ +{ \ + Unity.CurrentTestName = #TestFunc; \ + Unity.CurrentTestLineNumber = TestLineNum; \ + Unity.NumberOfTests++; \ + if (TEST_PROTECT()) \ + { \ + setUp(); \ + TestFunc(); \ + } \ + if (TEST_PROTECT() && !TEST_IS_IGNORED) \ + { \ + tearDown(); \ + } \ + UnityConcludeTest(); \ +} + +//=======Automagically Detected Files To Include===== +#include "unity.h" +#include +#include +#include "ProductionCode.h" + +//=======External Functions This Runner Calls===== +extern void setUp(void); +extern void tearDown(void); +extern void test_FindFunction_WhichIsBroken_ShouldReturnZeroIfItemIsNotInList_WhichWorksEvenInOurBrokenCode(void); +extern void test_FindFunction_WhichIsBroken_ShouldReturnTheIndexForItemsInList_WhichWillFailBecauseOurFunctionUnderTestIsBroken(void); +extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValue(void); +extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValueAgain(void); +extern void test_FunctionWhichReturnsLocalVariable_ShouldReturnCurrentCounter_ButFailsBecauseThisTestIsActuallyFlawed(void); + + +//=======Test Reset Option===== +void resetTest(void); +void resetTest(void) +{ + tearDown(); + setUp(); +} + + +//=======MAIN===== +int main(void) +{ + UnityBegin("test/TestProductionCode.c"); + RUN_TEST(test_FindFunction_WhichIsBroken_ShouldReturnZeroIfItemIsNotInList_WhichWorksEvenInOurBrokenCode, 20); + RUN_TEST(test_FindFunction_WhichIsBroken_ShouldReturnTheIndexForItemsInList_WhichWillFailBecauseOurFunctionUnderTestIsBroken, 30); + RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValue, 41); + RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnTheCurrentCounterValueAgain, 51); + RUN_TEST(test_FunctionWhichReturnsLocalVariable_ShouldReturnCurrentCounter_ButFailsBecauseThisTestIsActuallyFlawed, 57); + + return (UnityEnd()); +} diff --git a/test/rakefile b/test/rakefile index 476d3ab..6b0c3ef 100644 --- a/test/rakefile +++ b/test/rakefile @@ -1,60 +1,60 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -UNITY_ROOT = File.expand_path(File.dirname(__FILE__)) + '/' - -require 'rake' -require 'rake/clean' -require UNITY_ROOT + 'rakefile_helper' - -TEMP_DIRS = [ - File.join(UNITY_ROOT, 'build') -] - -TEMP_DIRS.each do |dir| - directory(dir) - CLOBBER.include(dir) -end - -task :prepare_for_tests => TEMP_DIRS - -include RakefileHelpers - -# Load proper GCC as defult configuration -DEFAULT_CONFIG_FILE = 'gcc_auto_stdint.yml' -configure_toolchain(DEFAULT_CONFIG_FILE) - -desc "Test unity with its own unit tests" -task :unit => [:prepare_for_tests] do - run_tests get_unit_test_files -end - -desc "Test unity's helper scripts" -task :scripts => [:prepare_for_tests] do - Dir['tests/test_*.rb'].each do |scriptfile| - require "./"+scriptfile - end -end - -desc "Generate test summary" -task :summary do - report_summary -end - -desc "Build and test Unity" -task :all => [:clean, :prepare_for_tests, :scripts, :unit, :summary] -task :default => [:clobber, :all] -task :ci => [:no_color, :default] -task :cruise => [:no_color, :default] - -desc "Load configuration" -task :config, :config_file do |t, args| - configure_toolchain(args[:config_file]) -end - -task :no_color do - $colour_output = false -end +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +UNITY_ROOT = File.expand_path(File.dirname(__FILE__)) + '/' + +require 'rake' +require 'rake/clean' +require UNITY_ROOT + 'rakefile_helper' + +TEMP_DIRS = [ + File.join(UNITY_ROOT, 'build') +] + +TEMP_DIRS.each do |dir| + directory(dir) + CLOBBER.include(dir) +end + +task :prepare_for_tests => TEMP_DIRS + +include RakefileHelpers + +# Load proper GCC as defult configuration +DEFAULT_CONFIG_FILE = 'gcc_auto_stdint.yml' +configure_toolchain(DEFAULT_CONFIG_FILE) + +desc "Test unity with its own unit tests" +task :unit => [:prepare_for_tests] do + run_tests get_unit_test_files +end + +desc "Test unity's helper scripts" +task :scripts => [:prepare_for_tests] do + Dir['tests/test_*.rb'].each do |scriptfile| + require "./"+scriptfile + end +end + +desc "Generate test summary" +task :summary do + report_summary +end + +desc "Build and test Unity" +task :all => [:clean, :prepare_for_tests, :scripts, :unit, :summary] +task :default => [:clobber, :all] +task :ci => [:no_color, :default] +task :cruise => [:no_color, :default] + +desc "Load configuration" +task :config, :config_file do |t, args| + configure_toolchain(args[:config_file]) +end + +task :no_color do + $colour_output = false +end diff --git a/test/rakefile_helper.rb b/test/rakefile_helper.rb index a5ae51e..ba9e5e2 100644 --- a/test/rakefile_helper.rb +++ b/test/rakefile_helper.rb @@ -1,255 +1,255 @@ -# ========================================== -# Unity Project - A Test Framework for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -require 'yaml' -require 'fileutils' -require UNITY_ROOT + '../auto/unity_test_summary' -require UNITY_ROOT + '../auto/generate_test_runner' -require UNITY_ROOT + '../auto/colour_reporter' - -module RakefileHelpers - - C_EXTENSION = '.c' - - def load_configuration(config_file) - unless ($configured) - $cfg_file = "targets/#{config_file}" unless (config_file =~ /[\\|\/]/) - $cfg = YAML.load(File.read($cfg_file)) - $colour_output = false unless $cfg['colour'] - $configured = true if (config_file != DEFAULT_CONFIG_FILE) - end - end - - def configure_clean - CLEAN.include($cfg['compiler']['build_path'] + '*.*') unless $cfg['compiler']['build_path'].nil? - end - - def configure_toolchain(config_file=DEFAULT_CONFIG_FILE) - config_file += '.yml' unless config_file =~ /\.yml$/ - config_file = config_file unless config_file =~ /[\\|\/]/ - load_configuration(config_file) - configure_clean - end - - def get_unit_test_files - path = $cfg['compiler']['unit_tests_path'] + 'test*' + C_EXTENSION - path.gsub!(/\\/, '/') - FileList.new(path) - end - - def get_local_include_dirs - include_dirs = $cfg['compiler']['includes']['items'].dup - include_dirs.delete_if {|dir| dir.is_a?(Array)} - return include_dirs - end - - def extract_headers(filename) - includes = [] - lines = File.readlines(filename) - lines.each do |line| - m = line.match(/^\s*#include\s+\"\s*(.+\.[hH])\s*\"/) - if not m.nil? - includes << m[1] - end - end - return includes - end - - def find_source_file(header, paths) - paths.each do |dir| - src_file = dir + header.ext(C_EXTENSION) - if (File.exists?(src_file)) - return src_file - end - end - return nil - end - - def tackit(strings) - if strings.is_a?(Array) - result = "\"#{strings.join}\"" - else - result = strings - end - return result - end - - def squash(prefix, items) - result = '' - items.each { |item| result += " #{prefix}#{tackit(item)}" } - return result - end - - def should(behave, &block) - if block - puts "Should " + behave - yield block - else - puts "UNIMPLEMENTED CASE: Should #{behave}" - end - end - - def build_compiler_fields - command = tackit($cfg['compiler']['path']) - if $cfg['compiler']['defines']['items'].nil? - defines = '' - else - defines = squash($cfg['compiler']['defines']['prefix'], $cfg['compiler']['defines']['items']) - end - options = squash('', $cfg['compiler']['options']) - includes = squash($cfg['compiler']['includes']['prefix'], $cfg['compiler']['includes']['items']) - includes = includes.gsub(/\\ /, ' ').gsub(/\\\"/, '"').gsub(/\\$/, '') # Remove trailing slashes (for IAR) - return {:command => command, :defines => defines, :options => options, :includes => includes} - end - - def compile(file, defines=[]) - compiler = build_compiler_fields - cmd_str = "#{compiler[:command]}#{compiler[:defines]}#{compiler[:options]}#{compiler[:includes]} #{file} " + - "#{$cfg['compiler']['object_files']['prefix']}#{$cfg['compiler']['object_files']['destination']}" - obj_file = "#{File.basename(file, C_EXTENSION)}#{$cfg['compiler']['object_files']['extension']}" - execute(cmd_str + obj_file) - return obj_file - end - - def build_linker_fields - command = tackit($cfg['linker']['path']) - if $cfg['linker']['options'].nil? - options = '' - else - options = squash('', $cfg['linker']['options']) - end - if ($cfg['linker']['includes'].nil? || $cfg['linker']['includes']['items'].nil?) - includes = '' - else - includes = squash($cfg['linker']['includes']['prefix'], $cfg['linker']['includes']['items']) - end - includes = includes.gsub(/\\ /, ' ').gsub(/\\\"/, '"').gsub(/\\$/, '') # Remove trailing slashes (for IAR) - return {:command => command, :options => options, :includes => includes} - end - - def link_it(exe_name, obj_list) - linker = build_linker_fields - cmd_str = "#{linker[:command]}#{linker[:options]}#{linker[:includes]} " + - (obj_list.map{|obj|"#{$cfg['linker']['object_files']['path']}#{obj} "}).join + - $cfg['linker']['bin_files']['prefix'] + ' ' + - $cfg['linker']['bin_files']['destination'] + - exe_name + $cfg['linker']['bin_files']['extension'] - execute(cmd_str) - end - - def build_simulator_fields - return nil if $cfg['simulator'].nil? - if $cfg['simulator']['path'].nil? - command = '' - else - command = (tackit($cfg['simulator']['path']) + ' ') - end - if $cfg['simulator']['pre_support'].nil? - pre_support = '' - else - pre_support = squash('', $cfg['simulator']['pre_support']) - end - if $cfg['simulator']['post_support'].nil? - post_support = '' - else - post_support = squash('', $cfg['simulator']['post_support']) - end - return {:command => command, :pre_support => pre_support, :post_support => post_support} - end - - def execute(command_string, verbose=true) - report command_string - output = `#{command_string}`.chomp - report(output) if (verbose && !output.nil? && (output.length > 0)) - if $?.exitstatus != 0 - raise "Command failed. (Returned #{$?.exitstatus})" - end - return output - end - - def report_summary - summary = UnityTestSummary.new - summary.set_root_path(UNITY_ROOT) - results_glob = "#{$cfg['compiler']['build_path']}*.test*" - results_glob.gsub!(/\\/, '/') - results = Dir[results_glob] - summary.set_targets(results) - report summary.run - end - - def run_tests(test_files) - report 'Running Unity system tests...' - - # Tack on TEST define for compiling unit tests - load_configuration($cfg_file) - test_defines = ['TEST'] - $cfg['compiler']['defines']['items'] = [] if $cfg['compiler']['defines']['items'].nil? - $cfg['compiler']['defines']['items'] << 'TEST' - - include_dirs = get_local_include_dirs - - # Build and execute each unit test - test_files.each do |test| - obj_list = [] - - if !$cfg['compiler']['aux_sources'].nil? - $cfg['compiler']['aux_sources'].each do |aux| - obj_list << compile(aux, test_defines) - end - end - - # Detect dependencies and build required modules - extract_headers(test).each do |header| - # Compile corresponding source file if it exists - src_file = find_source_file(header, include_dirs) - if !src_file.nil? - obj_list << compile(src_file, test_defines) - end - end - - # Build the test runner (generate if configured to do so) - test_base = File.basename(test, C_EXTENSION) - - runner_name = test_base + '_Runner.c' - runner_path = '' - - if $cfg['compiler']['runner_path'].nil? - runner_path = $cfg['compiler']['build_path'] + runner_name - else - runner_path = $cfg['compiler']['runner_path'] + runner_name - end - - options = $cfg[:unity] - options[:use_param_tests] = (test =~ /parameterized/) ? true : false - UnityTestRunnerGenerator.new(options).run(test, runner_path) - obj_list << compile(runner_path, test_defines) - - # Build the test module - obj_list << compile(test, test_defines) - - # Link the test executable - link_it(test_base, obj_list) - - # Execute unit test and generate results file - simulator = build_simulator_fields - executable = $cfg['linker']['bin_files']['destination'] + test_base + $cfg['linker']['bin_files']['extension'] - if simulator.nil? - cmd_str = executable - else - cmd_str = "#{simulator[:command]} #{simulator[:pre_support]} #{executable} #{simulator[:post_support]}" - end - output = execute(cmd_str) - test_results = $cfg['compiler']['build_path'] + test_base - if output.match(/OK$/m).nil? - test_results += '.testfail' - else - test_results += '.testpass' - end - File.open(test_results, 'w') { |f| f.print output } - - end - end -end +# ========================================== +# Unity Project - A Test Framework for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +require 'yaml' +require 'fileutils' +require UNITY_ROOT + '../auto/unity_test_summary' +require UNITY_ROOT + '../auto/generate_test_runner' +require UNITY_ROOT + '../auto/colour_reporter' + +module RakefileHelpers + + C_EXTENSION = '.c' + + def load_configuration(config_file) + unless ($configured) + $cfg_file = "targets/#{config_file}" unless (config_file =~ /[\\|\/]/) + $cfg = YAML.load(File.read($cfg_file)) + $colour_output = false unless $cfg['colour'] + $configured = true if (config_file != DEFAULT_CONFIG_FILE) + end + end + + def configure_clean + CLEAN.include($cfg['compiler']['build_path'] + '*.*') unless $cfg['compiler']['build_path'].nil? + end + + def configure_toolchain(config_file=DEFAULT_CONFIG_FILE) + config_file += '.yml' unless config_file =~ /\.yml$/ + config_file = config_file unless config_file =~ /[\\|\/]/ + load_configuration(config_file) + configure_clean + end + + def get_unit_test_files + path = $cfg['compiler']['unit_tests_path'] + 'test*' + C_EXTENSION + path.gsub!(/\\/, '/') + FileList.new(path) + end + + def get_local_include_dirs + include_dirs = $cfg['compiler']['includes']['items'].dup + include_dirs.delete_if {|dir| dir.is_a?(Array)} + return include_dirs + end + + def extract_headers(filename) + includes = [] + lines = File.readlines(filename) + lines.each do |line| + m = line.match(/^\s*#include\s+\"\s*(.+\.[hH])\s*\"/) + if not m.nil? + includes << m[1] + end + end + return includes + end + + def find_source_file(header, paths) + paths.each do |dir| + src_file = dir + header.ext(C_EXTENSION) + if (File.exists?(src_file)) + return src_file + end + end + return nil + end + + def tackit(strings) + if strings.is_a?(Array) + result = "\"#{strings.join}\"" + else + result = strings + end + return result + end + + def squash(prefix, items) + result = '' + items.each { |item| result += " #{prefix}#{tackit(item)}" } + return result + end + + def should(behave, &block) + if block + puts "Should " + behave + yield block + else + puts "UNIMPLEMENTED CASE: Should #{behave}" + end + end + + def build_compiler_fields + command = tackit($cfg['compiler']['path']) + if $cfg['compiler']['defines']['items'].nil? + defines = '' + else + defines = squash($cfg['compiler']['defines']['prefix'], $cfg['compiler']['defines']['items']) + end + options = squash('', $cfg['compiler']['options']) + includes = squash($cfg['compiler']['includes']['prefix'], $cfg['compiler']['includes']['items']) + includes = includes.gsub(/\\ /, ' ').gsub(/\\\"/, '"').gsub(/\\$/, '') # Remove trailing slashes (for IAR) + return {:command => command, :defines => defines, :options => options, :includes => includes} + end + + def compile(file, defines=[]) + compiler = build_compiler_fields + cmd_str = "#{compiler[:command]}#{compiler[:defines]}#{compiler[:options]}#{compiler[:includes]} #{file} " + + "#{$cfg['compiler']['object_files']['prefix']}#{$cfg['compiler']['object_files']['destination']}" + obj_file = "#{File.basename(file, C_EXTENSION)}#{$cfg['compiler']['object_files']['extension']}" + execute(cmd_str + obj_file) + return obj_file + end + + def build_linker_fields + command = tackit($cfg['linker']['path']) + if $cfg['linker']['options'].nil? + options = '' + else + options = squash('', $cfg['linker']['options']) + end + if ($cfg['linker']['includes'].nil? || $cfg['linker']['includes']['items'].nil?) + includes = '' + else + includes = squash($cfg['linker']['includes']['prefix'], $cfg['linker']['includes']['items']) + end + includes = includes.gsub(/\\ /, ' ').gsub(/\\\"/, '"').gsub(/\\$/, '') # Remove trailing slashes (for IAR) + return {:command => command, :options => options, :includes => includes} + end + + def link_it(exe_name, obj_list) + linker = build_linker_fields + cmd_str = "#{linker[:command]}#{linker[:options]}#{linker[:includes]} " + + (obj_list.map{|obj|"#{$cfg['linker']['object_files']['path']}#{obj} "}).join + + $cfg['linker']['bin_files']['prefix'] + ' ' + + $cfg['linker']['bin_files']['destination'] + + exe_name + $cfg['linker']['bin_files']['extension'] + execute(cmd_str) + end + + def build_simulator_fields + return nil if $cfg['simulator'].nil? + if $cfg['simulator']['path'].nil? + command = '' + else + command = (tackit($cfg['simulator']['path']) + ' ') + end + if $cfg['simulator']['pre_support'].nil? + pre_support = '' + else + pre_support = squash('', $cfg['simulator']['pre_support']) + end + if $cfg['simulator']['post_support'].nil? + post_support = '' + else + post_support = squash('', $cfg['simulator']['post_support']) + end + return {:command => command, :pre_support => pre_support, :post_support => post_support} + end + + def execute(command_string, verbose=true) + report command_string + output = `#{command_string}`.chomp + report(output) if (verbose && !output.nil? && (output.length > 0)) + if $?.exitstatus != 0 + raise "Command failed. (Returned #{$?.exitstatus})" + end + return output + end + + def report_summary + summary = UnityTestSummary.new + summary.set_root_path(UNITY_ROOT) + results_glob = "#{$cfg['compiler']['build_path']}*.test*" + results_glob.gsub!(/\\/, '/') + results = Dir[results_glob] + summary.set_targets(results) + report summary.run + end + + def run_tests(test_files) + report 'Running Unity system tests...' + + # Tack on TEST define for compiling unit tests + load_configuration($cfg_file) + test_defines = ['TEST'] + $cfg['compiler']['defines']['items'] = [] if $cfg['compiler']['defines']['items'].nil? + $cfg['compiler']['defines']['items'] << 'TEST' + + include_dirs = get_local_include_dirs + + # Build and execute each unit test + test_files.each do |test| + obj_list = [] + + if !$cfg['compiler']['aux_sources'].nil? + $cfg['compiler']['aux_sources'].each do |aux| + obj_list << compile(aux, test_defines) + end + end + + # Detect dependencies and build required modules + extract_headers(test).each do |header| + # Compile corresponding source file if it exists + src_file = find_source_file(header, include_dirs) + if !src_file.nil? + obj_list << compile(src_file, test_defines) + end + end + + # Build the test runner (generate if configured to do so) + test_base = File.basename(test, C_EXTENSION) + + runner_name = test_base + '_Runner.c' + runner_path = '' + + if $cfg['compiler']['runner_path'].nil? + runner_path = $cfg['compiler']['build_path'] + runner_name + else + runner_path = $cfg['compiler']['runner_path'] + runner_name + end + + options = $cfg[:unity] + options[:use_param_tests] = (test =~ /parameterized/) ? true : false + UnityTestRunnerGenerator.new(options).run(test, runner_path) + obj_list << compile(runner_path, test_defines) + + # Build the test module + obj_list << compile(test, test_defines) + + # Link the test executable + link_it(test_base, obj_list) + + # Execute unit test and generate results file + simulator = build_simulator_fields + executable = $cfg['linker']['bin_files']['destination'] + test_base + $cfg['linker']['bin_files']['extension'] + if simulator.nil? + cmd_str = executable + else + cmd_str = "#{simulator[:command]} #{simulator[:pre_support]} #{executable} #{simulator[:post_support]}" + end + output = execute(cmd_str) + test_results = $cfg['compiler']['build_path'] + test_base + if output.match(/OK$/m).nil? + test_results += '.testfail' + else + test_results += '.testpass' + end + File.open(test_results, 'w') { |f| f.print output } + + end + end +end diff --git a/test/testdata/mocksample.c b/test/testdata/mocksample.c index 847d87f..b709438 100644 --- a/test/testdata/mocksample.c +++ b/test/testdata/mocksample.c @@ -1,51 +1,51 @@ -// This is just a sample test file to be used to test the generator script -#ifndef TEST_SAMPLE_H -#define TEST_SAMPLE_H - -#include -#include "unity.h" -#include "funky.h" -#include "Mockstanky.h" - -void setUp(void) -{ - CustomSetupStuff(); -} - -void tearDown(void) -{ - CustomTeardownStuff -} - -//Yup, nice comment -void test_TheFirstThingToTest(void) -{ - TEST_ASSERT(1); - - TEST_ASSERT_TRUE(1); -} - -/* -void test_ShouldBeIgnored(void) -{ - DoesStuff(); -} -*/ - -//void test_ShouldAlsoNotBeTested(void) -//{ -// Call_An_Expect(); -// -// CallAFunction(); -// test_CallAFunctionThatLooksLikeATest(); -//} - -void test_TheSecondThingToTest(void) -{ - Call_An_Expect(); - - CallAFunction(); - test_CallAFunctionThatLooksLikeATest(); -} - -#endif //TEST_SAMPLE_H +// This is just a sample test file to be used to test the generator script +#ifndef TEST_SAMPLE_H +#define TEST_SAMPLE_H + +#include +#include "unity.h" +#include "funky.h" +#include "Mockstanky.h" + +void setUp(void) +{ + CustomSetupStuff(); +} + +void tearDown(void) +{ + CustomTeardownStuff +} + +//Yup, nice comment +void test_TheFirstThingToTest(void) +{ + TEST_ASSERT(1); + + TEST_ASSERT_TRUE(1); +} + +/* +void test_ShouldBeIgnored(void) +{ + DoesStuff(); +} +*/ + +//void test_ShouldAlsoNotBeTested(void) +//{ +// Call_An_Expect(); +// +// CallAFunction(); +// test_CallAFunctionThatLooksLikeATest(); +//} + +void test_TheSecondThingToTest(void) +{ + Call_An_Expect(); + + CallAFunction(); + test_CallAFunctionThatLooksLikeATest(); +} + +#endif //TEST_SAMPLE_H diff --git a/test/testdata/sample.yml b/test/testdata/sample.yml index 52ec96b..9e5eece 100644 --- a/test/testdata/sample.yml +++ b/test/testdata/sample.yml @@ -1,9 +1,9 @@ -:unity: - :includes: - - two.h - - three.h - - - :plugins: - - :cexception - :suite_setup: | +:unity: + :includes: + - two.h + - three.h + - + :plugins: + - :cexception + :suite_setup: | a_yaml_setup(); \ No newline at end of file diff --git a/test/testdata/testsample.c b/test/testdata/testsample.c index e5a2b18..4f30ec7 100644 --- a/test/testdata/testsample.c +++ b/test/testdata/testsample.c @@ -1,51 +1,51 @@ -// This is just a sample test file to be used to test the generator script -#ifndef TEST_SAMPLE_H -#define TEST_SAMPLE_H - -#include -#include "unity.h" -#include "funky.h" -#include "stanky.h" - -void setUp(void) -{ - CustomSetupStuff(); -} - -void tearDown(void) -{ - CustomTeardownStuff -} - -//Yup, nice comment -void test_TheFirstThingToTest(void) -{ - TEST_ASSERT(1); - - TEST_ASSERT_TRUE(1); -} - -/* -void test_ShouldBeIgnored(void) -{ - DoesStuff(); -} -*/ - -//void test_ShouldAlsoNotBeTested(void) -//{ -// Call_An_Expect(); -// -// CallAFunction(); -// test_CallAFunctionThatLooksLikeATest(); -//} - -void test_TheSecondThingToTest(void) -{ - Call_An_Expect(); - - CallAFunction(); - test_CallAFunctionThatLooksLikeATest(); -} - -#endif //TEST_SAMPLE_H +// This is just a sample test file to be used to test the generator script +#ifndef TEST_SAMPLE_H +#define TEST_SAMPLE_H + +#include +#include "unity.h" +#include "funky.h" +#include "stanky.h" + +void setUp(void) +{ + CustomSetupStuff(); +} + +void tearDown(void) +{ + CustomTeardownStuff +} + +//Yup, nice comment +void test_TheFirstThingToTest(void) +{ + TEST_ASSERT(1); + + TEST_ASSERT_TRUE(1); +} + +/* +void test_ShouldBeIgnored(void) +{ + DoesStuff(); +} +*/ + +//void test_ShouldAlsoNotBeTested(void) +//{ +// Call_An_Expect(); +// +// CallAFunction(); +// test_CallAFunctionThatLooksLikeATest(); +//} + +void test_TheSecondThingToTest(void) +{ + Call_An_Expect(); + + CallAFunction(); + test_CallAFunctionThatLooksLikeATest(); +} + +#endif //TEST_SAMPLE_H diff --git a/test/tests/test_generate_test_runner.rb b/test/tests/test_generate_test_runner.rb index 7ac458d..b923e72 100644 --- a/test/tests/test_generate_test_runner.rb +++ b/test/tests/test_generate_test_runner.rb @@ -1,102 +1,102 @@ -# ========================================== -# CMock Project - Automatic Mock Generation for C -# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams -# [Released under MIT License. Please refer to license.txt for details] -# ========================================== - -require '../auto/generate_test_runner.rb' - -TEST_FILE = 'testdata/testsample.c' -TEST_MOCK = 'testdata/mocksample.c' -OUT_FILE = 'build/testsample_' -EXP_FILE = 'expectdata/testsample_' - -$generate_test_runner_failures = 0 - -def verify_output_equal(subtest) - expected = File.read(EXP_FILE + subtest + '.c').gsub(/\r\n/,"\n") - actual = File.read(OUT_FILE + subtest + '.c').gsub(/\r\n/,"\n") - if (expected != actual) - report(" #{subtest}:FAIL") - $generate_test_runner_failures += 1 - else - report(" #{subtest}:PASS") - end -end - -should "GenerateARunnerByCreatingRunnerWithOptions" do - sets = { 'def' => nil, - 'new1' => { :plugins => [:cexception], :includes => ['one.h', 'two.h'], :enforce_strict_ordering => true }, - 'new2' => { :plugins => [:ignore], :suite_setup => "a_custom_setup();", :suite_teardown => "a_custom_teardown();" } - } - - sets.each_pair do |subtest, options| - UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') - verify_output_equal(subtest) - UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') - verify_output_equal('mock_' + subtest) - end -end - -should "GenerateARunnerAlongWithAHeaderIfSpecified" do - sets = { 'head1' => { :header_file => "#{OUT_FILE}head1.h" } } - sets.each_pair do |subtest, options| - UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') - verify_output_equal(subtest) - end - - sets = { 'head1' => { :header_file => "#{OUT_FILE}mock_head1.h" } } - sets.each_pair do |subtest, options| - UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') - verify_output_equal('mock_' + subtest) - end -end - -should "GenerateARunnerByRunningRunnerWithOptions" do - sets = { 'run1' => { :plugins => [:cexception], :includes => ['one.h', 'two.h'], :enforce_strict_ordering => true }, - 'run2' => { :plugins => [:ignore], :suite_setup => "a_custom_setup();", :suite_teardown => "a_custom_teardown();" } - } - - sets.each_pair do |subtest, options| - UnityTestRunnerGenerator.new.run(TEST_FILE, OUT_FILE + subtest + '.c', options) - verify_output_equal(subtest) - UnityTestRunnerGenerator.new.run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c', options) - verify_output_equal('mock_' + subtest) - end -end - -should "GenerateARunnerByPullingYamlOptions" do - subtest = 'yaml' - cmdstr = "ruby ../auto/generate_test_runner.rb testdata/sample.yml \"#{TEST_FILE}\" \"#{OUT_FILE + subtest + '.c'}\"" - `#{cmdstr}` - verify_output_equal(subtest) - - cmdstr = "ruby ../auto/generate_test_runner.rb testdata/sample.yml \"#{TEST_MOCK}\" \"#{OUT_FILE + 'mock_' + subtest + '.c'}\"" - `#{cmdstr}` - verify_output_equal('mock_' + subtest) -end - -should "GenerateARunnerByPullingCommandlineOptions" do - subtest = 'cmd' - cmdstr = "ruby ../auto/generate_test_runner.rb -cexception \"#{TEST_FILE}\" \"#{OUT_FILE + subtest + '.c'}\"" - `#{cmdstr}` - verify_output_equal(subtest) - - cmdstr = "ruby ../auto/generate_test_runner.rb -cexception \"#{TEST_MOCK}\" \"#{OUT_FILE + 'mock_' + subtest + '.c'}\"" - `#{cmdstr}` - verify_output_equal('mock_' + subtest) -end - -should "GenerateARunnerThatUsesParameterizedTests" do - sets = { 'param' => { :plugins => [:ignore], :use_param_tests => true } - } - - sets.each_pair do |subtest, options| - UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') - verify_output_equal(subtest) - UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') - verify_output_equal('mock_' + subtest) - end -end - -raise "There were #{$generate_test_runner_failures.to_s} failures while testing generate_test_runner.rb" if ($generate_test_runner_failures > 0) +# ========================================== +# CMock Project - Automatic Mock Generation for C +# Copyright (c) 2007 Mike Karlesky, Mark VanderVoord, Greg Williams +# [Released under MIT License. Please refer to license.txt for details] +# ========================================== + +require '../auto/generate_test_runner.rb' + +TEST_FILE = 'testdata/testsample.c' +TEST_MOCK = 'testdata/mocksample.c' +OUT_FILE = 'build/testsample_' +EXP_FILE = 'expectdata/testsample_' + +$generate_test_runner_failures = 0 + +def verify_output_equal(subtest) + expected = File.read(EXP_FILE + subtest + '.c').gsub(/\r\n/,"\n") + actual = File.read(OUT_FILE + subtest + '.c').gsub(/\r\n/,"\n") + if (expected != actual) + report(" #{subtest}:FAIL") + $generate_test_runner_failures += 1 + else + report(" #{subtest}:PASS") + end +end + +should "GenerateARunnerByCreatingRunnerWithOptions" do + sets = { 'def' => nil, + 'new1' => { :plugins => [:cexception], :includes => ['one.h', 'two.h'], :enforce_strict_ordering => true }, + 'new2' => { :plugins => [:ignore], :suite_setup => "a_custom_setup();", :suite_teardown => "a_custom_teardown();" } + } + + sets.each_pair do |subtest, options| + UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') + verify_output_equal(subtest) + UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') + verify_output_equal('mock_' + subtest) + end +end + +should "GenerateARunnerAlongWithAHeaderIfSpecified" do + sets = { 'head1' => { :header_file => "#{OUT_FILE}head1.h" } } + sets.each_pair do |subtest, options| + UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') + verify_output_equal(subtest) + end + + sets = { 'head1' => { :header_file => "#{OUT_FILE}mock_head1.h" } } + sets.each_pair do |subtest, options| + UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') + verify_output_equal('mock_' + subtest) + end +end + +should "GenerateARunnerByRunningRunnerWithOptions" do + sets = { 'run1' => { :plugins => [:cexception], :includes => ['one.h', 'two.h'], :enforce_strict_ordering => true }, + 'run2' => { :plugins => [:ignore], :suite_setup => "a_custom_setup();", :suite_teardown => "a_custom_teardown();" } + } + + sets.each_pair do |subtest, options| + UnityTestRunnerGenerator.new.run(TEST_FILE, OUT_FILE + subtest + '.c', options) + verify_output_equal(subtest) + UnityTestRunnerGenerator.new.run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c', options) + verify_output_equal('mock_' + subtest) + end +end + +should "GenerateARunnerByPullingYamlOptions" do + subtest = 'yaml' + cmdstr = "ruby ../auto/generate_test_runner.rb testdata/sample.yml \"#{TEST_FILE}\" \"#{OUT_FILE + subtest + '.c'}\"" + `#{cmdstr}` + verify_output_equal(subtest) + + cmdstr = "ruby ../auto/generate_test_runner.rb testdata/sample.yml \"#{TEST_MOCK}\" \"#{OUT_FILE + 'mock_' + subtest + '.c'}\"" + `#{cmdstr}` + verify_output_equal('mock_' + subtest) +end + +should "GenerateARunnerByPullingCommandlineOptions" do + subtest = 'cmd' + cmdstr = "ruby ../auto/generate_test_runner.rb -cexception \"#{TEST_FILE}\" \"#{OUT_FILE + subtest + '.c'}\"" + `#{cmdstr}` + verify_output_equal(subtest) + + cmdstr = "ruby ../auto/generate_test_runner.rb -cexception \"#{TEST_MOCK}\" \"#{OUT_FILE + 'mock_' + subtest + '.c'}\"" + `#{cmdstr}` + verify_output_equal('mock_' + subtest) +end + +should "GenerateARunnerThatUsesParameterizedTests" do + sets = { 'param' => { :plugins => [:ignore], :use_param_tests => true } + } + + sets.each_pair do |subtest, options| + UnityTestRunnerGenerator.new(options).run(TEST_FILE, OUT_FILE + subtest + '.c') + verify_output_equal(subtest) + UnityTestRunnerGenerator.new(options).run(TEST_MOCK, OUT_FILE + 'mock_' + subtest + '.c') + verify_output_equal('mock_' + subtest) + end +end + +raise "There were #{$generate_test_runner_failures.to_s} failures while testing generate_test_runner.rb" if ($generate_test_runner_failures > 0)