diff --git a/CMakeLists.txt b/CMakeLists.txt index 5af0704..b66a611 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,7 +20,8 @@ add_executable(Graph src/main.cpp src/renderer.cpp src/worldpass.cpp - src/postpass.cpp) + src/postpass.cpp + src/dofpass.cpp) target_link_libraries(Graph PUBLIC SDL2::SDL2 SDL2::SDL2main Vulkan::Vulkan assimp::assimp) target_include_directories(Graph PUBLIC include) @@ -28,8 +29,12 @@ add_shaders(Graph shaders/triangle.vert shaders/triangle.frag shaders/post.vert - shaders/post.frag) + shaders/post.frag + shaders/gfield.vert + shaders/gfield.frag) add_data(Graph data/suzanne.obj - data/test.cim) + data/test.cim + data/bokeh.png + data/scene.obj) diff --git a/data/bokeh.png b/data/bokeh.png new file mode 100644 index 0000000..f590a7e Binary files /dev/null and b/data/bokeh.png differ diff --git a/data/scene.obj b/data/scene.obj new file mode 100644 index 0000000..a104553 --- /dev/null +++ b/data/scene.obj @@ -0,0 +1,3120 @@ +# Blender v2.79 (sub 0) OBJ File: '' +# www.blender.org +mtllib scene.mtl +o Torus +v 1.216452 0.007353 -2.723024 +v 1.354939 0.175562 -2.776624 +v 1.396437 0.395483 -2.792685 +v 1.329827 0.608190 -2.766905 +v 1.172958 0.756688 -2.706190 +v 0.967862 0.801186 -2.626810 +v 0.769494 0.729761 -2.550035 +v 0.631007 0.561552 -2.496435 +v 0.589509 0.341630 -2.480374 +v 0.656119 0.128923 -2.506154 +v 0.812988 -0.019574 -2.566869 +v 1.018084 -0.064072 -2.646249 +v 1.104783 0.022803 -2.983147 +v 1.246262 0.190598 -3.029776 +v 1.295935 0.409389 -3.026795 +v 1.240492 0.620551 -2.975003 +v 1.094790 0.767503 -2.888276 +v 0.897868 0.810870 -2.789854 +v 0.702493 0.739032 -2.706108 +v 0.561014 0.571237 -2.659479 +v 0.511341 0.352446 -2.662460 +v 0.566784 0.141284 -2.714252 +v 0.712486 -0.005668 -2.800979 +v 0.909408 -0.049035 -2.899401 +v 0.975906 0.068891 -3.231419 +v 1.120838 0.235451 -3.271396 +v 1.179946 0.450868 -3.250240 +v 1.137391 0.657421 -3.173620 +v 1.004576 0.799765 -3.062067 +v 0.817089 0.839758 -2.945470 +v 0.625167 0.766684 -2.855072 +v 0.480235 0.600124 -2.815094 +v 0.421127 0.384707 -2.836250 +v 0.463682 0.178154 -2.912870 +v 0.596497 0.035811 -3.024424 +v 0.783984 -0.004182 -3.141021 +v 0.832026 0.144828 -3.463593 +v 0.980814 0.309353 -3.497349 +v 1.050454 0.519211 -3.459197 +v 1.022287 0.718171 -3.359359 +v 0.903860 0.852920 -3.224588 +v 0.726906 0.887354 -3.090995 +v 0.538839 0.812246 -2.994376 +v 0.390051 0.647721 -2.960619 +v 0.320411 0.437863 -2.998771 +v 0.348578 0.238903 -3.098609 +v 0.467005 0.104154 -3.233380 +v 0.643959 0.069719 -3.366973 +v 0.675605 0.249314 -3.675695 +v 0.828583 0.411039 -3.703768 +v 0.909674 0.613248 -3.650089 +v 0.897149 0.801759 -3.529042 +v 0.794365 0.926060 -3.373060 +v 0.628861 0.952846 -3.223940 +v 0.444986 0.874938 -3.121637 +v 0.292007 0.713212 -3.093565 +v 0.210916 0.511003 -3.147243 +v 0.223441 0.322492 -3.268291 +v 0.326226 0.198191 -3.424273 +v 0.491729 0.171406 -3.573393 +v 0.509319 0.380561 -3.864097 +v 0.666753 0.538770 -3.887122 +v 0.760017 0.731371 -3.819651 +v 0.764121 0.906757 -3.679763 +v 0.677964 1.017933 -3.504941 +v 0.524634 1.035111 -3.342029 +v 0.345214 0.953686 -3.234679 +v 0.187780 0.795477 -3.211654 +v 0.094516 0.602876 -3.279125 +v 0.090412 0.427490 -3.419013 +v 0.176568 0.316313 -3.593834 +v 0.329899 0.299136 -3.756747 +v 0.336012 0.536324 -4.025576 +v 0.498090 0.690359 -4.044274 +v 0.604041 0.871558 -3.964982 +v 0.625476 1.031368 -3.808946 +v 0.556650 1.126968 -3.617977 +v 0.416006 1.132742 -3.443243 +v 0.241230 1.047144 -3.331566 +v 0.079152 0.893109 -3.312868 +v -0.026799 0.711910 -3.392160 +v -0.048233 0.552101 -3.548195 +v 0.020592 0.456500 -3.739165 +v 0.161236 0.450726 -3.913898 +v 0.158652 0.713938 -4.157367 +v 0.325483 0.863214 -4.172534 +v 0.444417 1.031410 -4.083594 +v 0.483588 1.173458 -3.914379 +v 0.432498 1.251297 -3.710231 +v 0.304838 1.244070 -3.525849 +v 0.134814 1.153712 -3.410641 +v -0.032016 1.004436 -3.395474 +v -0.150951 0.836240 -3.484414 +v -0.190121 0.694191 -3.653629 +v -0.139032 0.616352 -3.857777 +v -0.011372 0.623580 -4.042159 +v -0.019728 0.910363 -4.257217 +v 0.151882 1.054376 -4.269709 +v 0.283875 1.208193 -4.173459 +v 0.340883 1.330599 -3.994259 +v 0.307632 1.388795 -3.780126 +v 0.193030 1.367188 -3.588435 +v 0.027786 1.271567 -3.470551 +v -0.143824 1.127555 -3.458060 +v -0.275817 0.973738 -3.554309 +v -0.332825 0.851332 -3.733509 +v -0.299574 0.793136 -3.947643 +v -0.184972 0.814743 -4.139333 +v -0.196075 1.122239 -4.323417 +v -0.019740 1.260575 -4.334134 +v 0.125162 1.398881 -4.233039 +v 0.199805 1.500100 -4.047219 +v 0.184189 1.537108 -3.826465 +v 0.082497 1.499991 -3.629929 +v -0.078022 1.398693 -3.510270 +v -0.254358 1.260357 -3.499553 +v -0.399260 1.122051 -3.600649 +v -0.473903 1.020832 -3.786469 +v -0.458286 0.983824 -4.007222 +v -0.356594 1.020941 -4.203759 +v -0.367373 1.345941 -4.354834 +v -0.186448 1.478283 -4.364709 +v -0.029006 1.600213 -4.261314 +v 0.062767 1.679061 -4.072352 +v 0.064280 1.693700 -3.848457 +v -0.024872 1.640206 -3.649621 +v -0.180801 1.532914 -3.529120 +v -0.361726 1.400573 -3.519245 +v -0.519169 1.278642 -3.622641 +v -0.610942 1.199794 -3.811602 +v -0.612455 1.185156 -4.035498 +v -0.523303 1.238649 -4.234334 +v -0.530691 1.577641 -4.350930 +v -0.345390 1.703774 -4.360910 +v -0.175992 1.808743 -4.257801 +v -0.067887 1.864421 -4.069230 +v -0.050042 1.855890 -3.845725 +v -0.127239 1.785434 -3.647174 +v -0.278792 1.671934 -3.526778 +v -0.464093 1.545801 -3.516798 +v -0.633491 1.440832 -3.619908 +v -0.741596 1.385154 -3.808479 +v -0.759440 1.393686 -4.031984 +v -0.682244 1.464141 -4.230535 +v -0.683232 1.813374 -4.311773 +v -0.493844 1.933190 -4.322803 +v -0.313279 2.020902 -4.222559 +v -0.189920 2.053007 -4.037904 +v -0.156821 2.020902 -3.818315 +v -0.222851 1.933190 -3.622630 +v -0.370317 1.813374 -3.503284 +v -0.559705 1.693557 -3.492255 +v -0.740270 1.605845 -3.592498 +v -0.863629 1.573740 -3.777153 +v -0.896728 1.605845 -3.996742 +v -0.830698 1.693557 -4.192427 +v -0.822389 2.049107 -4.238032 +v -0.629272 2.162607 -4.251037 +v -0.438520 2.233063 -4.156192 +v -0.301246 2.241594 -3.978911 +v -0.254231 2.185916 -3.766696 +v -0.310074 2.080947 -3.576410 +v -0.453811 1.954814 -3.459039 +v -0.646928 1.841313 -3.446034 +v -0.837680 1.770858 -3.540879 +v -0.974954 1.762327 -3.718160 +v -1.021969 1.818005 -3.930375 +v -0.966126 1.922974 -4.120662 +v -0.945780 2.280806 -4.130969 +v -0.749356 2.388098 -4.146843 +v -0.549572 2.441592 -4.059835 +v -0.399958 2.426953 -3.893261 +v -0.340604 2.348105 -3.691752 +v -0.387414 2.226175 -3.509303 +v -0.527845 2.093833 -3.394802 +v -0.724268 1.986541 -3.378928 +v -0.924053 1.933048 -3.465935 +v -1.073667 1.947686 -3.632510 +v -1.133020 2.026534 -3.834019 +v -1.086210 2.148465 -4.016468 +v -1.051293 2.504508 -3.992416 +v -0.852042 2.605806 -4.012002 +v -0.644534 2.642924 -3.935138 +v -0.484369 2.605915 -3.782418 +v -0.414464 2.504697 -3.594764 +v -0.453550 2.366390 -3.422459 +v -0.591153 2.228055 -3.311670 +v -0.790404 2.126757 -3.292083 +v -0.997912 2.089639 -3.368948 +v -1.158077 2.126648 -3.521667 +v -1.227982 2.227866 -3.709321 +v -1.188897 2.366173 -3.881627 +v -1.137123 2.716384 -3.824744 +v -0.935573 2.812005 -3.848823 +v -0.721781 2.833612 -3.784233 +v -0.553033 2.775416 -3.648280 +v -0.474545 2.653010 -3.477394 +v -0.507347 2.499193 -3.317363 +v -0.642651 2.355180 -3.211066 +v -0.844202 2.259559 -3.186987 +v -1.057994 2.237952 -3.251577 +v -1.226741 2.296149 -3.387530 +v -1.305229 2.418554 -3.558416 +v -1.272427 2.572371 -3.718448 +v -1.201802 2.912810 -3.630821 +v -0.998519 3.003167 -3.660096 +v -0.779992 3.010395 -3.609702 +v -0.604776 2.932556 -3.493142 +v -0.519820 2.790508 -3.341648 +v -0.547888 2.622312 -3.195812 +v -0.681459 2.473035 -3.094712 +v -0.884742 2.382678 -3.065437 +v -1.103269 2.375450 -3.115831 +v -1.278485 2.453289 -3.232391 +v -1.363441 2.595337 -3.383885 +v -1.335373 2.763534 -3.529720 +v -1.244223 3.090424 -3.413965 +v -1.039803 3.176022 -3.449051 +v -0.818171 3.170248 -3.414532 +v -0.638713 3.074647 -3.319657 +v -0.549515 2.914838 -3.189849 +v -0.574477 2.733639 -3.059888 +v -0.706911 2.579604 -2.964599 +v -0.911331 2.494006 -2.929513 +v -1.132963 2.499780 -2.964032 +v -1.312421 2.595380 -3.058907 +v -1.401619 2.755190 -3.188715 +v -1.376657 2.936389 -3.318676 +v -1.263660 3.246187 -3.177888 +v -1.058719 3.327611 -3.219300 +v -0.835664 3.310434 -3.202063 +v -0.654262 3.199258 -3.130796 +v -0.563121 3.023871 -3.024595 +v -0.586660 2.831270 -2.911917 +v -0.718573 2.673062 -2.822953 +v -0.923514 2.591637 -2.781541 +v -1.146569 2.608814 -2.798779 +v -1.327971 2.719991 -2.870045 +v -1.419113 2.895377 -2.976246 +v -1.395573 3.087978 -3.088924 +v -1.259781 3.377434 -2.926629 +v -1.054943 3.455342 -2.974773 +v -0.832173 3.428556 -2.975929 +v -0.651159 3.304255 -2.929788 +v -0.560405 3.115745 -2.848713 +v -0.584228 2.913535 -2.754429 +v -0.716246 2.751810 -2.672197 +v -0.921083 2.673902 -2.624053 +v -1.143854 2.700687 -2.622897 +v -1.324867 2.824988 -2.669038 +v -1.415621 3.013499 -2.750113 +v -1.391798 3.215708 -2.844398 +v -1.232651 3.481920 -2.664485 +v -1.028541 3.557028 -2.719653 +v -0.807756 3.522594 -2.740000 +v -0.629455 3.387844 -2.720073 +v -0.541414 3.188885 -2.665213 +v -0.567224 2.979027 -2.590118 +v -0.699968 2.814501 -2.514911 +v -0.904078 2.739393 -2.459743 +v -1.124863 2.773827 -2.439396 +v -1.303164 2.908577 -2.459323 +v -1.391204 3.107536 -2.514183 +v -1.365395 3.317394 -2.589278 +v -1.182735 3.557857 -2.395943 +v -0.979963 3.630930 -2.458307 +v -0.762832 3.590937 -2.498312 +v -0.589523 3.448593 -2.505240 +v -0.506473 3.242040 -2.477234 +v -0.535937 3.026623 -2.421798 +v -0.670018 2.860063 -2.353786 +v -0.872791 2.786990 -2.291422 +v -1.089922 2.826983 -2.251417 +v -1.263231 2.969326 -2.244489 +v -1.346281 3.175879 -2.272496 +v -1.316817 3.391296 -2.327931 +v -1.110888 3.603944 -2.125599 +v -0.910041 3.675783 -2.195206 +v -0.698169 3.632416 -2.255002 +v -0.532045 3.485464 -2.288965 +v -0.456180 3.274302 -2.287992 +v -0.490903 3.055511 -2.252347 +v -0.626910 2.887716 -2.191579 +v -0.827758 2.815877 -2.121972 +v -1.039629 2.859244 -2.062176 +v -1.205753 3.006196 -2.028214 +v -1.281618 3.217358 -2.029186 +v -1.246895 3.436149 -2.064831 +v -1.018339 3.619395 -1.858076 +v -0.819971 3.690820 -1.934852 +v -0.614875 3.646322 -2.014232 +v -0.458005 3.497824 -2.074946 +v -0.391396 3.285117 -2.100727 +v -0.432894 3.065196 -2.084666 +v -0.571380 2.896986 -2.031066 +v -0.769748 2.825562 -1.954290 +v -0.974844 2.870060 -1.874910 +v -1.131714 3.018557 -1.814196 +v -1.198323 3.231264 -1.788415 +v -1.156825 3.451186 -1.804477 +v -0.906669 3.603944 -1.597953 +v -0.711294 3.675783 -1.681699 +v -0.514373 3.632416 -1.780121 +v -0.368670 3.485464 -1.866848 +v -0.313227 3.274302 -1.918641 +v -0.362900 3.055511 -1.921622 +v -0.504379 2.887716 -1.874992 +v -0.699754 2.815877 -1.791246 +v -0.896676 2.859244 -1.692824 +v -1.042378 3.006196 -1.606097 +v -1.097821 3.217358 -1.554305 +v -1.048148 3.436149 -1.551324 +v -0.777793 3.557857 -1.349682 +v -0.585871 3.630930 -1.440080 +v -0.398384 3.590937 -1.556677 +v -0.265569 3.448593 -1.668231 +v -0.223014 3.242040 -1.744851 +v -0.282121 3.026623 -1.766006 +v -0.427053 2.860063 -1.726029 +v -0.618975 2.786990 -1.635631 +v -0.806462 2.826983 -1.519034 +v -0.939277 2.969326 -1.407480 +v -0.981832 3.175879 -1.330860 +v -0.922725 3.391296 -1.309705 +v -0.633913 3.481920 -1.117509 +v -0.445846 3.557028 -1.214128 +v -0.268892 3.522594 -1.347721 +v -0.150465 3.387845 -1.482492 +v -0.122298 3.188885 -1.582329 +v -0.191938 2.979027 -1.620481 +v -0.340725 2.814502 -1.586725 +v -0.528792 2.739393 -1.490106 +v -0.705746 2.773828 -1.356513 +v -0.824173 2.908577 -1.221742 +v -0.852340 3.107537 -1.121904 +v -0.782700 3.317395 -1.083752 +v -0.477491 3.377434 -0.905406 +v -0.293616 3.455342 -1.007708 +v -0.128112 3.428557 -1.156828 +v -0.025328 3.304256 -1.312810 +v -0.012803 3.115745 -1.433857 +v -0.093894 2.913536 -1.487536 +v -0.246872 2.751810 -1.459463 +v -0.430748 2.673902 -1.357161 +v -0.596251 2.700687 -1.208041 +v -0.699036 2.824989 -1.052059 +v -0.711561 3.013499 -0.931012 +v -0.630470 3.215709 -0.877333 +v -0.311205 3.246187 -0.717003 +v -0.131785 3.327611 -0.824354 +v 0.021546 3.310434 -0.987266 +v 0.107702 3.199258 -1.162088 +v 0.103598 3.023871 -1.301975 +v 0.010334 2.831270 -1.369447 +v -0.147100 2.673062 -1.346422 +v -0.326520 2.591637 -1.239071 +v -0.479851 2.608814 -1.076159 +v -0.566007 2.719991 -0.901337 +v -0.561903 2.895377 -0.761449 +v -0.468639 3.087978 -0.693978 +v -0.137899 3.090424 -0.555525 +v 0.036877 3.176022 -0.667202 +v 0.177521 3.170248 -0.841936 +v 0.246346 3.074647 -1.032905 +v 0.224912 2.914838 -1.188941 +v 0.118961 2.733639 -1.268233 +v -0.043117 2.579604 -1.249535 +v -0.217893 2.494006 -1.137858 +v -0.358537 2.499780 -0.963124 +v -0.427362 2.595380 -0.772155 +v -0.405928 2.755190 -0.616119 +v -0.299977 2.936389 -0.536827 +v 0.039461 2.912811 -0.423733 +v 0.209485 3.003168 -0.538942 +v 0.337145 3.010396 -0.723323 +v 0.388234 2.932557 -0.927472 +v 0.349064 2.790508 -1.096687 +v 0.230129 2.622312 -1.185627 +v 0.063299 2.473036 -1.170460 +v -0.106725 2.382679 -1.055251 +v -0.234385 2.375451 -0.870870 +v -0.285474 2.453290 -0.666721 +v -0.246304 2.595338 -0.497507 +v -0.127369 2.763535 -0.408567 +v 0.217842 2.716384 -0.323883 +v 0.383086 2.812005 -0.441767 +v 0.497688 2.833612 -0.633458 +v 0.530939 2.775416 -0.847591 +v 0.473931 2.653010 -1.026791 +v 0.341938 2.499193 -1.123041 +v 0.170328 2.355180 -1.110550 +v 0.005084 2.259559 -0.992665 +v -0.109518 2.237952 -0.800975 +v -0.142769 2.296149 -0.586841 +v -0.085761 2.418554 -0.407641 +v 0.046232 2.572371 -0.311392 +v 0.394189 2.504508 -0.257683 +v 0.554708 2.605806 -0.377341 +v 0.656400 2.642924 -0.573878 +v 0.672017 2.605915 -0.794632 +v 0.597374 2.504697 -0.980452 +v 0.452471 2.366390 -1.081547 +v 0.276136 2.228055 -1.070830 +v 0.115617 2.126757 -0.951172 +v 0.013925 2.089639 -0.754635 +v -0.001692 2.126648 -0.533881 +v 0.072952 2.227866 -0.348062 +v 0.217854 2.366173 -0.246966 +v 0.565487 2.280807 -0.226266 +v 0.721416 2.388099 -0.346766 +v 0.810568 2.441592 -0.545603 +v 0.809055 2.426954 -0.769498 +v 0.717282 2.348106 -0.958460 +v 0.559840 2.226175 -1.061855 +v 0.378915 2.093834 -1.051980 +v 0.222986 1.986542 -0.931480 +v 0.133833 1.933048 -0.732643 +v 0.135347 1.947687 -0.508748 +v 0.227119 2.026535 -0.319787 +v 0.384562 2.148465 -0.216391 +v 0.728804 2.049108 -0.230170 +v 0.880357 2.162608 -0.350565 +v 0.957553 2.233063 -0.549116 +v 0.939708 2.241595 -0.772621 +v 0.831604 2.185916 -0.961192 +v 0.662206 2.080948 -1.064302 +v 0.476905 1.954814 -1.054322 +v 0.325352 1.841314 -0.933927 +v 0.248155 1.770859 -0.735376 +v 0.266000 1.762328 -0.511871 +v 0.374104 1.818006 -0.323300 +v 0.543502 1.922975 -0.220190 +v 0.881346 1.813374 -0.269327 +v 1.028812 1.933190 -0.388674 +v 1.094842 2.020902 -0.584358 +v 1.061743 2.053007 -0.803947 +v 0.938384 2.020902 -0.988603 +v 0.757819 1.933190 -1.088846 +v 0.568431 1.813374 -1.077816 +v 0.420965 1.693557 -0.958470 +v 0.354935 1.605845 -0.762786 +v 0.388034 1.573740 -0.543197 +v 0.511393 1.605845 -0.358541 +v 0.691958 1.693557 -0.258298 +v 1.020503 1.577641 -0.343068 +v 1.164240 1.703774 -0.460438 +v 1.220082 1.808743 -0.650725 +v 1.173068 1.864421 -0.862940 +v 1.035793 1.855890 -1.040221 +v 0.845042 1.785434 -1.135066 +v 0.651924 1.671934 -1.122061 +v 0.508187 1.545801 -1.004691 +v 0.452345 1.440832 -0.814405 +v 0.499359 1.385154 -0.602189 +v 0.636634 1.393686 -0.424908 +v 0.827386 1.464141 -0.330063 +v 1.143893 1.345942 -0.450131 +v 1.284324 1.478284 -0.564633 +v 1.331134 1.600214 -0.747081 +v 1.271780 1.679062 -0.948590 +v 1.122167 1.693700 -1.115165 +v 0.922382 1.640207 -1.202173 +v 0.725959 1.532915 -1.186299 +v 0.585528 1.400573 -1.071797 +v 0.538718 1.278643 -0.889349 +v 0.598072 1.199795 -0.687840 +v 0.747685 1.185156 -0.521265 +v 0.947470 1.238650 -0.434257 +v 1.249407 1.122239 -0.588685 +v 1.387010 1.260575 -0.699474 +v 1.426096 1.398881 -0.871780 +v 1.356191 1.500100 -1.059433 +v 1.196026 1.537108 -1.212153 +v 0.988517 1.499991 -1.289017 +v 0.789267 1.398693 -1.269431 +v 0.651663 1.260357 -1.158642 +v 0.612577 1.122051 -0.986336 +v 0.682483 1.020832 -0.798683 +v 0.842647 0.983824 -0.645963 +v 1.050156 1.020941 -0.569098 +v 1.335237 0.910363 -0.756357 +v 1.470541 1.054376 -0.862653 +v 1.503343 1.208193 -1.022685 +v 1.424855 1.330599 -1.193571 +v 1.256107 1.388795 -1.329523 +v 1.042315 1.367188 -1.394113 +v 0.840765 1.271567 -1.370034 +v 0.705461 1.127555 -1.263738 +v 0.672659 0.973738 -1.103707 +v 0.751147 0.851332 -0.932820 +v 0.919895 0.793136 -0.796868 +v 1.133687 0.814743 -0.732278 +v 1.399916 0.713938 -0.950279 +v 1.533486 0.863214 -1.051380 +v 1.561554 1.031410 -1.197215 +v 1.476598 1.173459 -1.348709 +v 1.301382 1.251298 -1.465269 +v 1.082856 1.244070 -1.515663 +v 0.879572 1.153713 -1.486388 +v 0.746001 1.004436 -1.385288 +v 0.717934 0.836240 -1.239452 +v 0.802890 0.694192 -1.087958 +v 0.978106 0.616353 -0.971398 +v 1.196632 0.623581 -0.921004 +v 1.442337 0.536325 -1.167134 +v 1.574771 0.690360 -1.262424 +v 1.599733 0.871558 -1.392384 +v 1.510535 1.031368 -1.522193 +v 1.331077 1.126968 -1.617067 +v 1.109445 1.132743 -1.651586 +v 0.905025 1.047144 -1.616500 +v 0.772591 0.893109 -1.521211 +v 0.747628 0.711911 -1.391251 +v 0.836826 0.552101 -1.261442 +v 1.016284 0.456501 -1.166568 +v 1.237916 0.450726 -1.132048 +v 1.461774 0.380561 -1.403213 +v 1.593687 0.538770 -1.492177 +v 1.617226 0.731371 -1.604855 +v 1.526085 0.906757 -1.711056 +v 1.344683 1.017933 -1.782322 +v 1.121628 1.035111 -1.799559 +v 0.916687 0.953686 -1.758148 +v 0.784774 0.795477 -1.669184 +v 0.761234 0.602876 -1.556506 +v 0.852376 0.427490 -1.450305 +v 1.033778 0.316313 -1.379038 +v 1.256833 0.299136 -1.361801 +v 1.457894 0.249314 -1.654472 +v 1.589911 0.411039 -1.736703 +v 1.613735 0.613248 -1.830988 +v 1.522981 0.801759 -1.912063 +v 1.341967 0.926060 -1.958204 +v 1.119196 0.952846 -1.957047 +v 0.914359 0.874938 -1.908903 +v 0.782342 0.713212 -1.826672 +v 0.758519 0.511003 -1.732387 +v 0.849272 0.322492 -1.651312 +v 1.030286 0.198191 -1.605172 +v 1.253057 0.171406 -1.606328 +v 1.430765 0.144828 -1.916615 +v 1.563509 0.309353 -1.991822 +v 1.589318 0.519211 -2.066917 +v 1.501277 0.718171 -2.121777 +v 1.322977 0.852921 -2.141704 +v 1.102192 0.887355 -2.121357 +v 0.898082 0.812246 -2.066189 +v 0.765338 0.647721 -1.990982 +v 0.739528 0.437863 -1.915887 +v 0.827569 0.238904 -1.861027 +v 1.005870 0.104154 -1.841100 +v 1.226655 0.069720 -1.861447 +v 1.380849 0.068891 -2.185158 +v 1.514931 0.235451 -2.253169 +v 1.544394 0.450868 -2.308605 +v 1.461345 0.657421 -2.336612 +v 1.288036 0.799765 -2.329684 +v 1.070905 0.839758 -2.289678 +v 0.868132 0.766684 -2.227315 +v 0.734051 0.600124 -2.159303 +v 0.704587 0.384707 -2.103867 +v 0.787636 0.178154 -2.075861 +v 0.960945 0.035811 -2.082788 +v 1.178076 -0.004182 -2.122794 +v 1.309002 0.022803 -2.455502 +v 1.445009 0.190598 -2.516270 +v 1.479732 0.409389 -2.551915 +v 1.403867 0.620551 -2.552887 +v 1.237743 0.767503 -2.518925 +v 1.025871 0.810870 -2.459129 +v 0.825024 0.739032 -2.389521 +v 0.689017 0.571237 -2.328753 +v 0.654294 0.352446 -2.293108 +v 0.730159 0.141284 -2.292136 +v 0.896283 -0.005668 -2.326098 +v 1.108154 -0.049035 -2.385895 +vn 0.6749 -0.6605 -0.3290 +vn 0.8964 -0.1981 -0.3966 +vn 0.8778 0.3183 -0.3579 +vn 0.6232 0.7496 -0.2230 +vn 0.2015 0.9791 -0.0283 +vn -0.2735 0.9461 0.1736 +vn -0.6749 0.6605 0.3290 +vn -0.8964 0.1981 0.3966 +vn -0.8778 -0.3183 0.3579 +vn -0.6232 -0.7496 0.2230 +vn -0.2015 -0.9791 0.0283 +vn 0.2735 -0.9461 -0.1736 +vn 0.6212 -0.6467 -0.4425 +vn 0.8570 -0.1880 -0.4798 +vn 0.8634 0.3220 -0.3884 +vn 0.6377 0.7459 -0.1925 +vn 0.2408 0.9690 0.0549 +vn -0.2197 0.9323 0.2871 +vn -0.6212 0.6467 0.4425 +vn -0.8570 0.1880 0.4798 +vn -0.8634 -0.3220 0.3884 +vn -0.6377 -0.7459 0.1925 +vn -0.2408 -0.9690 -0.0549 +vn 0.2197 -0.9323 -0.2871 +vn 0.5603 -0.6195 -0.5498 +vn 0.8124 -0.1681 -0.5584 +vn 0.8471 0.3293 -0.4172 +vn 0.6540 0.7386 -0.1637 +vn 0.2855 0.9490 0.1335 +vn -0.1588 0.9051 0.3944 +vn -0.5603 0.6195 0.5498 +vn -0.8124 0.1681 0.5584 +vn -0.8471 -0.3293 0.4172 +vn -0.6540 -0.7386 0.1637 +vn -0.2855 -0.9490 -0.1335 +vn 0.1588 -0.9051 -0.3944 +vn 0.4932 -0.5792 -0.6491 +vn 0.7632 -0.1385 -0.6311 +vn 0.8290 0.3401 -0.4439 +vn 0.6720 0.7277 -0.1371 +vn 0.3346 0.9195 0.2062 +vn -0.0917 0.8648 0.4937 +vn -0.4932 0.5792 0.6491 +vn -0.7632 0.1385 0.6311 +vn -0.8290 -0.3401 0.4439 +vn -0.6720 -0.7277 0.1371 +vn -0.3346 -0.9195 -0.2062 +vn 0.0917 -0.8648 -0.4937 +vn 0.4211 -0.5265 -0.7385 +vn 0.7104 -0.1000 -0.6967 +vn 0.8097 0.3542 -0.4679 +vn 0.6914 0.7136 -0.1131 +vn 0.3874 0.8809 0.2717 +vn -0.0197 0.8121 0.5831 +vn -0.4211 0.5265 0.7385 +vn -0.7104 0.1000 0.6967 +vn -0.8097 -0.3542 0.4679 +vn -0.6914 -0.7136 0.1131 +vn -0.3874 -0.8809 -0.2717 +vn 0.0197 -0.8121 -0.5831 +vn 0.3453 -0.4624 -0.8167 +vn 0.6548 -0.0530 -0.7539 +vn 0.7893 0.3714 -0.4889 +vn 0.7117 0.6964 -0.0921 +vn 0.4430 0.8340 0.3290 +vn 0.0562 0.7480 0.6613 +vn -0.3453 0.4624 0.8167 +vn -0.6548 0.0530 0.7539 +vn -0.7893 -0.3714 0.4889 +vn -0.7117 -0.6964 0.0921 +vn -0.4430 -0.8340 -0.3290 +vn -0.0562 -0.7480 -0.6613 +vn 0.2670 -0.3880 -0.8822 +vn 0.5974 0.0016 -0.8019 +vn 0.7683 0.3914 -0.5064 +vn 0.7328 0.6764 -0.0745 +vn 0.5004 0.7794 0.3770 +vn 0.1345 0.6736 0.7268 +vn -0.2670 0.3880 0.8822 +vn -0.5974 -0.0016 0.8019 +vn -0.7683 -0.3914 0.5064 +vn -0.7328 -0.6764 0.0745 +vn -0.5004 -0.7794 -0.3770 +vn -0.1345 -0.6736 -0.7268 +vn 0.1875 -0.3044 -0.9339 +vn 0.5392 0.0628 -0.8398 +vn 0.7470 0.4139 -0.5203 +vn 0.7541 0.6540 -0.0606 +vn 0.5586 0.7182 0.4149 +vn 0.2140 0.5900 0.7785 +vn -0.1875 0.3044 0.9339 +vn -0.5392 -0.0628 0.8398 +vn -0.7470 -0.4139 0.5203 +vn -0.7541 -0.6540 0.0606 +vn -0.5586 -0.7182 -0.4149 +vn -0.2140 -0.5900 -0.7785 +vn 0.1083 -0.2133 -0.9710 +vn 0.4812 0.1296 -0.8670 +vn 0.7257 0.4383 -0.5303 +vn 0.7753 0.6295 -0.0507 +vn 0.6167 0.6514 0.4421 +vn 0.2932 0.4988 0.8156 +vn -0.1083 0.2133 0.9710 +vn -0.4812 -0.1296 0.8670 +vn -0.7257 -0.4383 0.5303 +vn -0.7753 -0.6295 0.0507 +vn -0.6167 -0.6514 -0.4421 +vn -0.2932 -0.4988 -0.8156 +vn 0.0306 -0.1160 -0.9928 +vn 0.4243 0.2009 -0.8830 +vn 0.7049 0.4645 -0.5361 +vn 0.7962 0.6034 -0.0448 +vn 0.6735 0.5801 0.4581 +vn 0.3708 0.4016 0.8374 +vn -0.0306 0.1160 0.9928 +vn -0.4243 -0.2009 0.8830 +vn -0.7049 -0.4645 0.5361 +vn -0.7962 -0.6034 0.0448 +vn -0.6735 -0.5801 -0.4581 +vn -0.3708 -0.4016 -0.8374 +vn -0.0441 -0.0143 -0.9989 +vn 0.3695 0.2754 -0.8875 +vn 0.6848 0.4918 -0.5378 +vn 0.8163 0.5761 -0.0432 +vn 0.7283 0.5056 0.4626 +vn 0.4456 0.2998 0.8435 +vn 0.0441 0.0143 0.9989 +vn -0.3695 -0.2754 0.8875 +vn -0.6848 -0.4918 0.5378 +vn -0.8163 -0.5761 0.0432 +vn -0.7283 -0.5056 -0.4626 +vn -0.4456 -0.2998 -0.8435 +vn -0.1146 0.0901 -0.9893 +vn 0.3178 0.3519 -0.8804 +vn 0.6659 0.5198 -0.5352 +vn 0.8352 0.5481 -0.0458 +vn 0.7800 0.4291 0.4555 +vn 0.5161 0.1954 0.8339 +vn 0.1146 -0.0901 0.9893 +vn -0.3178 -0.3519 0.8804 +vn -0.6659 -0.5198 0.5352 +vn -0.8352 -0.5481 0.0458 +vn -0.7800 -0.4291 -0.4555 +vn -0.5161 -0.1954 -0.8339 +vn -0.1798 0.1954 -0.9641 +vn 0.2701 0.4291 -0.8619 +vn 0.6484 0.5481 -0.5284 +vn 0.8527 0.5198 -0.0525 +vn 0.8277 0.3519 0.4370 +vn 0.5813 0.0901 0.8087 +vn 0.1798 -0.1954 0.9641 +vn -0.2701 -0.4291 0.8619 +vn -0.6484 -0.5481 0.5284 +vn -0.8527 -0.5198 0.0525 +vn -0.8277 -0.3519 -0.4370 +vn -0.5813 -0.0901 -0.8087 +vn -0.2384 0.2998 -0.9237 +vn 0.2271 0.5056 -0.8324 +vn 0.6326 0.5761 -0.5176 +vn 0.8684 0.4918 -0.0634 +vn 0.8707 0.2754 0.4075 +vn 0.6399 -0.0143 0.7683 +vn 0.2384 -0.2998 0.9237 +vn -0.2271 -0.5056 0.8324 +vn -0.6326 -0.5761 0.5176 +vn -0.8684 -0.4918 0.0634 +vn -0.8707 -0.2754 -0.4075 +vn -0.6399 0.0143 -0.7683 +vn -0.2896 0.4016 -0.8689 +vn 0.1897 0.5801 -0.7922 +vn 0.6189 0.6034 -0.5029 +vn 0.8821 0.4645 -0.0781 +vn 0.9082 0.2009 0.3673 +vn 0.6910 -0.1160 0.7135 +vn 0.2896 -0.4016 0.8689 +vn -0.1897 -0.5801 0.7922 +vn -0.6189 -0.6034 0.5029 +vn -0.8821 -0.4645 0.0781 +vn -0.9082 -0.2009 -0.3673 +vn -0.6910 0.1160 -0.7135 +vn -0.3323 0.4988 -0.8005 +vn 0.1584 0.6514 -0.7420 +vn 0.6074 0.6295 -0.4845 +vn 0.8936 0.4383 -0.0965 +vn 0.9395 0.1296 0.3171 +vn 0.7338 -0.2133 0.6451 +vn 0.3323 -0.4988 0.8005 +vn -0.1584 -0.6514 0.7420 +vn -0.6074 -0.6295 0.4845 +vn -0.8936 -0.4383 0.0965 +vn -0.9395 -0.1296 -0.3171 +vn -0.7338 0.2133 -0.6451 +vn -0.3659 0.5900 -0.7197 +vn 0.1337 0.7182 -0.6829 +vn 0.5984 0.6540 -0.4628 +vn 0.9026 0.4139 -0.1181 +vn 0.9641 0.0628 0.2580 +vn 0.7674 -0.3044 0.5643 +vn 0.3659 -0.5900 0.7197 +vn -0.1337 -0.7182 0.6829 +vn -0.5984 -0.6540 0.4628 +vn -0.9026 -0.4139 0.1181 +vn -0.9641 -0.0628 -0.2580 +vn -0.7674 0.3044 -0.5643 +vn -0.3898 0.6736 -0.6279 +vn 0.1162 0.7794 -0.6156 +vn 0.5920 0.6764 -0.4382 +vn 0.9091 0.3914 -0.1428 +vn 0.9816 0.0016 0.1907 +vn 0.7913 -0.3880 0.4726 +vn 0.3898 -0.6736 0.6279 +vn -0.1162 -0.7794 0.6156 +vn -0.5920 -0.6764 0.4382 +vn -0.9091 -0.3914 0.1428 +vn -0.9816 -0.0016 -0.1907 +vn -0.7913 0.3880 -0.4726 +vn -0.4036 0.7480 -0.5268 +vn 0.1061 0.8340 -0.5415 +vn 0.5883 0.6964 -0.4110 +vn 0.9128 0.3714 -0.1699 +vn 0.9918 -0.0530 0.1166 +vn 0.8051 -0.4624 0.3714 +vn 0.4036 -0.7480 0.5268 +vn -0.1061 -0.8340 0.5415 +vn -0.5883 -0.6964 0.4110 +vn -0.9128 -0.3714 0.1699 +vn -0.9918 0.0530 -0.1166 +vn -0.8051 0.4624 -0.3714 +vn -0.4071 0.8121 -0.4179 +vn 0.1035 0.8809 -0.4618 +vn 0.5874 0.7136 -0.3818 +vn 0.9137 0.3542 -0.1991 +vn 0.9943 -0.1000 0.0369 +vn 0.8086 -0.5265 0.2626 +vn 0.4071 -0.8121 0.4179 +vn -0.1035 -0.8809 0.4618 +vn -0.5874 -0.7136 0.3818 +vn -0.9137 -0.3542 0.1991 +vn -0.9943 0.1000 -0.0369 +vn -0.8086 0.5265 -0.2626 +vn -0.4002 0.8648 -0.3033 +vn 0.1086 0.9195 -0.3777 +vn 0.5892 0.7277 -0.3510 +vn 0.9118 0.3401 -0.2299 +vn 0.9892 -0.1385 -0.0472 +vn 0.8017 -0.5792 0.1479 +vn 0.4002 -0.8648 0.3033 +vn -0.1086 -0.9195 0.3777 +vn -0.5892 -0.7277 0.3510 +vn -0.9118 -0.3401 0.2299 +vn -0.9892 0.1385 0.0472 +vn -0.8017 0.5792 -0.1479 +vn -0.3830 0.9051 -0.1847 +vn 0.1212 0.9490 -0.2909 +vn 0.5938 0.7386 -0.3192 +vn 0.9072 0.3293 -0.2617 +vn 0.9766 -0.1681 -0.1340 +vn 0.7845 -0.6195 0.0294 +vn 0.3830 -0.9051 0.1847 +vn -0.1212 -0.9490 0.2909 +vn -0.5938 -0.7386 0.3192 +vn -0.9072 -0.3293 0.2617 +vn -0.9766 0.1681 0.1340 +vn -0.7845 0.6195 -0.0294 +vn -0.3558 0.9323 -0.0644 +vn 0.1411 0.9690 -0.2027 +vn 0.6011 0.7459 -0.2869 +vn 0.8999 0.3220 -0.2941 +vn 0.9567 -0.1880 -0.2222 +vn 0.7573 -0.6468 -0.0910 +vn 0.3558 -0.9323 0.0644 +vn -0.1411 -0.9690 0.2027 +vn -0.6011 -0.7459 0.2869 +vn -0.8999 -0.3220 0.2941 +vn -0.9567 0.1880 0.2222 +vn -0.7573 0.6467 0.0910 +vn -0.3191 0.9461 0.0557 +vn 0.1680 0.9791 -0.1147 +vn 0.6110 0.7496 -0.2547 +vn 0.8901 0.3183 -0.3263 +vn 0.9298 -0.1981 -0.3102 +vn 0.7205 -0.6605 -0.2111 +vn 0.3191 -0.9461 -0.0557 +vn -0.1680 -0.9791 0.1147 +vn -0.6110 -0.7496 0.2547 +vn -0.8901 -0.3183 0.3263 +vn -0.9298 0.1981 0.3102 +vn -0.7205 0.6605 0.2111 +vn -0.7327 -0.6764 0.0745 +vn -0.3898 0.6736 -0.6280 +vn 0.7573 -0.6467 -0.0910 +usemtl None +s off +f 1//1 13//1 14//1 2//1 +f 2//2 14//2 15//2 3//2 +f 3//3 15//3 16//3 4//3 +f 4//4 16//4 17//4 5//4 +f 5//5 17//5 18//5 6//5 +f 6//6 18//6 19//6 7//6 +f 7//7 19//7 20//7 8//7 +f 8//8 20//8 21//8 9//8 +f 9//9 21//9 22//9 10//9 +f 10//10 22//10 23//10 11//10 +f 11//11 23//11 24//11 12//11 +f 12//12 24//12 13//12 1//12 +f 13//13 25//13 26//13 14//13 +f 14//14 26//14 27//14 15//14 +f 15//15 27//15 28//15 16//15 +f 16//16 28//16 29//16 17//16 +f 17//17 29//17 30//17 18//17 +f 18//18 30//18 31//18 19//18 +f 19//19 31//19 32//19 20//19 +f 20//20 32//20 33//20 21//20 +f 21//21 33//21 34//21 22//21 +f 22//22 34//22 35//22 23//22 +f 23//23 35//23 36//23 24//23 +f 24//24 36//24 25//24 13//24 +f 25//25 37//25 38//25 26//25 +f 26//26 38//26 39//26 27//26 +f 27//27 39//27 40//27 28//27 +f 28//28 40//28 41//28 29//28 +f 29//29 41//29 42//29 30//29 +f 30//30 42//30 43//30 31//30 +f 31//31 43//31 44//31 32//31 +f 32//32 44//32 45//32 33//32 +f 33//33 45//33 46//33 34//33 +f 34//34 46//34 47//34 35//34 +f 35//35 47//35 48//35 36//35 +f 36//36 48//36 37//36 25//36 +f 37//37 49//37 50//37 38//37 +f 38//38 50//38 51//38 39//38 +f 39//39 51//39 52//39 40//39 +f 40//40 52//40 53//40 41//40 +f 41//41 53//41 54//41 42//41 +f 42//42 54//42 55//42 43//42 +f 43//43 55//43 56//43 44//43 +f 44//44 56//44 57//44 45//44 +f 45//45 57//45 58//45 46//45 +f 46//46 58//46 59//46 47//46 +f 47//47 59//47 60//47 48//47 +f 48//48 60//48 49//48 37//48 +f 49//49 61//49 62//49 50//49 +f 50//50 62//50 63//50 51//50 +f 51//51 63//51 64//51 52//51 +f 52//52 64//52 65//52 53//52 +f 53//53 65//53 66//53 54//53 +f 54//54 66//54 67//54 55//54 +f 55//55 67//55 68//55 56//55 +f 56//56 68//56 69//56 57//56 +f 57//57 69//57 70//57 58//57 +f 58//58 70//58 71//58 59//58 +f 59//59 71//59 72//59 60//59 +f 60//60 72//60 61//60 49//60 +f 61//61 73//61 74//61 62//61 +f 62//62 74//62 75//62 63//62 +f 63//63 75//63 76//63 64//63 +f 64//64 76//64 77//64 65//64 +f 65//65 77//65 78//65 66//65 +f 66//66 78//66 79//66 67//66 +f 67//67 79//67 80//67 68//67 +f 68//68 80//68 81//68 69//68 +f 69//69 81//69 82//69 70//69 +f 70//70 82//70 83//70 71//70 +f 71//71 83//71 84//71 72//71 +f 72//72 84//72 73//72 61//72 +f 73//73 85//73 86//73 74//73 +f 74//74 86//74 87//74 75//74 +f 75//75 87//75 88//75 76//75 +f 76//76 88//76 89//76 77//76 +f 77//77 89//77 90//77 78//77 +f 78//78 90//78 91//78 79//78 +f 79//79 91//79 92//79 80//79 +f 80//80 92//80 93//80 81//80 +f 81//81 93//81 94//81 82//81 +f 82//82 94//82 95//82 83//82 +f 83//83 95//83 96//83 84//83 +f 84//84 96//84 85//84 73//84 +f 85//85 97//85 98//85 86//85 +f 86//86 98//86 99//86 87//86 +f 87//87 99//87 100//87 88//87 +f 88//88 100//88 101//88 89//88 +f 89//89 101//89 102//89 90//89 +f 90//90 102//90 103//90 91//90 +f 91//91 103//91 104//91 92//91 +f 92//92 104//92 105//92 93//92 +f 93//93 105//93 106//93 94//93 +f 94//94 106//94 107//94 95//94 +f 95//95 107//95 108//95 96//95 +f 96//96 108//96 97//96 85//96 +f 97//97 109//97 110//97 98//97 +f 98//98 110//98 111//98 99//98 +f 99//99 111//99 112//99 100//99 +f 100//100 112//100 113//100 101//100 +f 101//101 113//101 114//101 102//101 +f 102//102 114//102 115//102 103//102 +f 103//103 115//103 116//103 104//103 +f 104//104 116//104 117//104 105//104 +f 105//105 117//105 118//105 106//105 +f 106//106 118//106 119//106 107//106 +f 107//107 119//107 120//107 108//107 +f 108//108 120//108 109//108 97//108 +f 109//109 121//109 122//109 110//109 +f 110//110 122//110 123//110 111//110 +f 111//111 123//111 124//111 112//111 +f 112//112 124//112 125//112 113//112 +f 113//113 125//113 126//113 114//113 +f 114//114 126//114 127//114 115//114 +f 115//115 127//115 128//115 116//115 +f 116//116 128//116 129//116 117//116 +f 117//117 129//117 130//117 118//117 +f 118//118 130//118 131//118 119//118 +f 119//119 131//119 132//119 120//119 +f 120//120 132//120 121//120 109//120 +f 121//121 133//121 134//121 122//121 +f 122//122 134//122 135//122 123//122 +f 123//123 135//123 136//123 124//123 +f 124//124 136//124 137//124 125//124 +f 125//125 137//125 138//125 126//125 +f 126//126 138//126 139//126 127//126 +f 127//127 139//127 140//127 128//127 +f 128//128 140//128 141//128 129//128 +f 129//129 141//129 142//129 130//129 +f 130//130 142//130 143//130 131//130 +f 131//131 143//131 144//131 132//131 +f 132//132 144//132 133//132 121//132 +f 133//133 145//133 146//133 134//133 +f 134//134 146//134 147//134 135//134 +f 135//135 147//135 148//135 136//135 +f 136//136 148//136 149//136 137//136 +f 137//137 149//137 150//137 138//137 +f 138//138 150//138 151//138 139//138 +f 139//139 151//139 152//139 140//139 +f 140//140 152//140 153//140 141//140 +f 141//141 153//141 154//141 142//141 +f 142//142 154//142 155//142 143//142 +f 143//143 155//143 156//143 144//143 +f 144//144 156//144 145//144 133//144 +f 145//145 157//145 158//145 146//145 +f 146//146 158//146 159//146 147//146 +f 147//147 159//147 160//147 148//147 +f 148//148 160//148 161//148 149//148 +f 149//149 161//149 162//149 150//149 +f 150//150 162//150 163//150 151//150 +f 151//151 163//151 164//151 152//151 +f 152//152 164//152 165//152 153//152 +f 153//153 165//153 166//153 154//153 +f 154//154 166//154 167//154 155//154 +f 155//155 167//155 168//155 156//155 +f 156//156 168//156 157//156 145//156 +f 157//157 169//157 170//157 158//157 +f 158//158 170//158 171//158 159//158 +f 159//159 171//159 172//159 160//159 +f 160//160 172//160 173//160 161//160 +f 161//161 173//161 174//161 162//161 +f 162//162 174//162 175//162 163//162 +f 163//163 175//163 176//163 164//163 +f 164//164 176//164 177//164 165//164 +f 165//165 177//165 178//165 166//165 +f 166//166 178//166 179//166 167//166 +f 167//167 179//167 180//167 168//167 +f 168//168 180//168 169//168 157//168 +f 169//169 181//169 182//169 170//169 +f 170//170 182//170 183//170 171//170 +f 171//171 183//171 184//171 172//171 +f 172//172 184//172 185//172 173//172 +f 173//173 185//173 186//173 174//173 +f 174//174 186//174 187//174 175//174 +f 175//175 187//175 188//175 176//175 +f 176//176 188//176 189//176 177//176 +f 177//177 189//177 190//177 178//177 +f 178//178 190//178 191//178 179//178 +f 179//179 191//179 192//179 180//179 +f 180//180 192//180 181//180 169//180 +f 181//181 193//181 194//181 182//181 +f 182//182 194//182 195//182 183//182 +f 183//183 195//183 196//183 184//183 +f 184//184 196//184 197//184 185//184 +f 185//185 197//185 198//185 186//185 +f 186//186 198//186 199//186 187//186 +f 187//187 199//187 200//187 188//187 +f 188//188 200//188 201//188 189//188 +f 189//189 201//189 202//189 190//189 +f 190//190 202//190 203//190 191//190 +f 191//191 203//191 204//191 192//191 +f 192//192 204//192 193//192 181//192 +f 193//193 205//193 206//193 194//193 +f 194//194 206//194 207//194 195//194 +f 195//195 207//195 208//195 196//195 +f 196//196 208//196 209//196 197//196 +f 197//197 209//197 210//197 198//197 +f 198//198 210//198 211//198 199//198 +f 199//199 211//199 212//199 200//199 +f 200//200 212//200 213//200 201//200 +f 201//201 213//201 214//201 202//201 +f 202//202 214//202 215//202 203//202 +f 203//203 215//203 216//203 204//203 +f 204//204 216//204 205//204 193//204 +f 205//205 217//205 218//205 206//205 +f 206//206 218//206 219//206 207//206 +f 207//207 219//207 220//207 208//207 +f 208//208 220//208 221//208 209//208 +f 209//209 221//209 222//209 210//209 +f 210//210 222//210 223//210 211//210 +f 211//211 223//211 224//211 212//211 +f 212//212 224//212 225//212 213//212 +f 213//213 225//213 226//213 214//213 +f 214//214 226//214 227//214 215//214 +f 215//215 227//215 228//215 216//215 +f 216//216 228//216 217//216 205//216 +f 217//217 229//217 230//217 218//217 +f 218//218 230//218 231//218 219//218 +f 219//219 231//219 232//219 220//219 +f 220//220 232//220 233//220 221//220 +f 221//221 233//221 234//221 222//221 +f 222//222 234//222 235//222 223//222 +f 223//223 235//223 236//223 224//223 +f 224//224 236//224 237//224 225//224 +f 225//225 237//225 238//225 226//225 +f 226//226 238//226 239//226 227//226 +f 227//227 239//227 240//227 228//227 +f 228//228 240//228 229//228 217//228 +f 229//229 241//229 242//229 230//229 +f 230//230 242//230 243//230 231//230 +f 231//231 243//231 244//231 232//231 +f 232//232 244//232 245//232 233//232 +f 233//233 245//233 246//233 234//233 +f 234//234 246//234 247//234 235//234 +f 235//235 247//235 248//235 236//235 +f 236//236 248//236 249//236 237//236 +f 237//237 249//237 250//237 238//237 +f 238//238 250//238 251//238 239//238 +f 239//239 251//239 252//239 240//239 +f 240//240 252//240 241//240 229//240 +f 241//241 253//241 254//241 242//241 +f 242//242 254//242 255//242 243//242 +f 243//243 255//243 256//243 244//243 +f 244//244 256//244 257//244 245//244 +f 245//245 257//245 258//245 246//245 +f 246//246 258//246 259//246 247//246 +f 247//247 259//247 260//247 248//247 +f 248//248 260//248 261//248 249//248 +f 249//249 261//249 262//249 250//249 +f 250//250 262//250 263//250 251//250 +f 251//251 263//251 264//251 252//251 +f 252//252 264//252 253//252 241//252 +f 253//253 265//253 266//253 254//253 +f 254//254 266//254 267//254 255//254 +f 255//255 267//255 268//255 256//255 +f 256//256 268//256 269//256 257//256 +f 257//257 269//257 270//257 258//257 +f 258//258 270//258 271//258 259//258 +f 259//259 271//259 272//259 260//259 +f 260//260 272//260 273//260 261//260 +f 261//261 273//261 274//261 262//261 +f 262//262 274//262 275//262 263//262 +f 263//263 275//263 276//263 264//263 +f 264//264 276//264 265//264 253//264 +f 265//265 277//265 278//265 266//265 +f 266//266 278//266 279//266 267//266 +f 267//267 279//267 280//267 268//267 +f 268//268 280//268 281//268 269//268 +f 269//269 281//269 282//269 270//269 +f 270//270 282//270 283//270 271//270 +f 271//271 283//271 284//271 272//271 +f 272//272 284//272 285//272 273//272 +f 273//273 285//273 286//273 274//273 +f 274//274 286//274 287//274 275//274 +f 275//275 287//275 288//275 276//275 +f 276//276 288//276 277//276 265//276 +f 277//277 289//277 290//277 278//277 +f 278//278 290//278 291//278 279//278 +f 279//279 291//279 292//279 280//279 +f 280//280 292//280 293//280 281//280 +f 281//281 293//281 294//281 282//281 +f 282//282 294//282 295//282 283//282 +f 283//283 295//283 296//283 284//283 +f 284//284 296//284 297//284 285//284 +f 285//285 297//285 298//285 286//285 +f 286//286 298//286 299//286 287//286 +f 287//287 299//287 300//287 288//287 +f 288//288 300//288 289//288 277//288 +f 289//6 301//6 302//6 290//6 +f 290//5 302//5 303//5 291//5 +f 291//4 303//4 304//4 292//4 +f 292//3 304//3 305//3 293//3 +f 293//2 305//2 306//2 294//2 +f 294//1 306//1 307//1 295//1 +f 295//12 307//12 308//12 296//12 +f 296//11 308//11 309//11 297//11 +f 297//10 309//10 310//10 298//10 +f 298//9 310//9 311//9 299//9 +f 299//8 311//8 312//8 300//8 +f 300//7 312//7 301//7 289//7 +f 301//18 313//18 314//18 302//18 +f 302//17 314//17 315//17 303//17 +f 303//16 315//16 316//16 304//16 +f 304//15 316//15 317//15 305//15 +f 305//14 317//14 318//14 306//14 +f 306//13 318//13 319//13 307//13 +f 307//24 319//24 320//24 308//24 +f 308//23 320//23 321//23 309//23 +f 309//22 321//22 322//22 310//22 +f 310//21 322//21 323//21 311//21 +f 311//20 323//20 324//20 312//20 +f 312//19 324//19 313//19 301//19 +f 313//30 325//30 326//30 314//30 +f 314//29 326//29 327//29 315//29 +f 315//28 327//28 328//28 316//28 +f 316//27 328//27 329//27 317//27 +f 317//26 329//26 330//26 318//26 +f 318//25 330//25 331//25 319//25 +f 319//36 331//36 332//36 320//36 +f 320//35 332//35 333//35 321//35 +f 321//34 333//34 334//34 322//34 +f 322//33 334//33 335//33 323//33 +f 323//32 335//32 336//32 324//32 +f 324//31 336//31 325//31 313//31 +f 325//42 337//42 338//42 326//42 +f 326//41 338//41 339//41 327//41 +f 327//40 339//40 340//40 328//40 +f 328//39 340//39 341//39 329//39 +f 329//38 341//38 342//38 330//38 +f 330//37 342//37 343//37 331//37 +f 331//48 343//48 344//48 332//48 +f 332//47 344//47 345//47 333//47 +f 333//46 345//46 346//46 334//46 +f 334//45 346//45 347//45 335//45 +f 335//44 347//44 348//44 336//44 +f 336//43 348//43 337//43 325//43 +f 337//54 349//54 350//54 338//54 +f 338//53 350//53 351//53 339//53 +f 339//52 351//52 352//52 340//52 +f 340//51 352//51 353//51 341//51 +f 341//50 353//50 354//50 342//50 +f 342//49 354//49 355//49 343//49 +f 343//60 355//60 356//60 344//60 +f 344//59 356//59 357//59 345//59 +f 345//58 357//58 358//58 346//58 +f 346//57 358//57 359//57 347//57 +f 347//56 359//56 360//56 348//56 +f 348//55 360//55 349//55 337//55 +f 349//66 361//66 362//66 350//66 +f 350//65 362//65 363//65 351//65 +f 351//64 363//64 364//64 352//64 +f 352//63 364//63 365//63 353//63 +f 353//62 365//62 366//62 354//62 +f 354//61 366//61 367//61 355//61 +f 355//72 367//72 368//72 356//72 +f 356//71 368//71 369//71 357//71 +f 357//70 369//70 370//70 358//70 +f 358//69 370//69 371//69 359//69 +f 359//68 371//68 372//68 360//68 +f 360//67 372//67 361//67 349//67 +f 361//78 373//78 374//78 362//78 +f 362//77 374//77 375//77 363//77 +f 363//76 375//76 376//76 364//76 +f 364//75 376//75 377//75 365//75 +f 365//74 377//74 378//74 366//74 +f 366//73 378//73 379//73 367//73 +f 367//84 379//84 380//84 368//84 +f 368//83 380//83 381//83 369//83 +f 369//289 381//289 382//289 370//289 +f 370//81 382//81 383//81 371//81 +f 371//80 383//80 384//80 372//80 +f 372//79 384//79 373//79 361//79 +f 373//90 385//90 386//90 374//90 +f 374//89 386//89 387//89 375//89 +f 375//88 387//88 388//88 376//88 +f 376//87 388//87 389//87 377//87 +f 377//86 389//86 390//86 378//86 +f 378//85 390//85 391//85 379//85 +f 379//96 391//96 392//96 380//96 +f 380//95 392//95 393//95 381//95 +f 381//94 393//94 394//94 382//94 +f 382//93 394//93 395//93 383//93 +f 383//92 395//92 396//92 384//92 +f 384//91 396//91 385//91 373//91 +f 385//102 397//102 398//102 386//102 +f 386//101 398//101 399//101 387//101 +f 387//100 399//100 400//100 388//100 +f 388//99 400//99 401//99 389//99 +f 389//98 401//98 402//98 390//98 +f 390//97 402//97 403//97 391//97 +f 391//108 403//108 404//108 392//108 +f 392//107 404//107 405//107 393//107 +f 393//106 405//106 406//106 394//106 +f 394//105 406//105 407//105 395//105 +f 395//104 407//104 408//104 396//104 +f 396//103 408//103 397//103 385//103 +f 397//114 409//114 410//114 398//114 +f 398//113 410//113 411//113 399//113 +f 399//112 411//112 412//112 400//112 +f 400//111 412//111 413//111 401//111 +f 401//110 413//110 414//110 402//110 +f 402//109 414//109 415//109 403//109 +f 403//120 415//120 416//120 404//120 +f 404//119 416//119 417//119 405//119 +f 405//118 417//118 418//118 406//118 +f 406//117 418//117 419//117 407//117 +f 407//116 419//116 420//116 408//116 +f 408//115 420//115 409//115 397//115 +f 409//126 421//126 422//126 410//126 +f 410//125 422//125 423//125 411//125 +f 411//124 423//124 424//124 412//124 +f 412//123 424//123 425//123 413//123 +f 413//122 425//122 426//122 414//122 +f 414//121 426//121 427//121 415//121 +f 415//132 427//132 428//132 416//132 +f 416//131 428//131 429//131 417//131 +f 417//130 429//130 430//130 418//130 +f 418//129 430//129 431//129 419//129 +f 419//128 431//128 432//128 420//128 +f 420//127 432//127 421//127 409//127 +f 421//138 433//138 434//138 422//138 +f 422//137 434//137 435//137 423//137 +f 423//136 435//136 436//136 424//136 +f 424//135 436//135 437//135 425//135 +f 425//134 437//134 438//134 426//134 +f 426//133 438//133 439//133 427//133 +f 427//144 439//144 440//144 428//144 +f 428//143 440//143 441//143 429//143 +f 429//142 441//142 442//142 430//142 +f 430//141 442//141 443//141 431//141 +f 431//140 443//140 444//140 432//140 +f 432//139 444//139 433//139 421//139 +f 433//150 445//150 446//150 434//150 +f 434//149 446//149 447//149 435//149 +f 435//148 447//148 448//148 436//148 +f 436//147 448//147 449//147 437//147 +f 437//146 449//146 450//146 438//146 +f 438//145 450//145 451//145 439//145 +f 439//156 451//156 452//156 440//156 +f 440//155 452//155 453//155 441//155 +f 441//154 453//154 454//154 442//154 +f 442//153 454//153 455//153 443//153 +f 443//152 455//152 456//152 444//152 +f 444//151 456//151 445//151 433//151 +f 445//162 457//162 458//162 446//162 +f 446//161 458//161 459//161 447//161 +f 447//160 459//160 460//160 448//160 +f 448//159 460//159 461//159 449//159 +f 449//158 461//158 462//158 450//158 +f 450//157 462//157 463//157 451//157 +f 451//168 463//168 464//168 452//168 +f 452//167 464//167 465//167 453//167 +f 453//166 465//166 466//166 454//166 +f 454//165 466//165 467//165 455//165 +f 455//164 467//164 468//164 456//164 +f 456//163 468//163 457//163 445//163 +f 457//174 469//174 470//174 458//174 +f 458//173 470//173 471//173 459//173 +f 459//172 471//172 472//172 460//172 +f 460//171 472//171 473//171 461//171 +f 461//170 473//170 474//170 462//170 +f 462//169 474//169 475//169 463//169 +f 463//180 475//180 476//180 464//180 +f 464//179 476//179 477//179 465//179 +f 465//178 477//178 478//178 466//178 +f 466//177 478//177 479//177 467//177 +f 467//176 479//176 480//176 468//176 +f 468//175 480//175 469//175 457//175 +f 469//186 481//186 482//186 470//186 +f 470//185 482//185 483//185 471//185 +f 471//184 483//184 484//184 472//184 +f 472//183 484//183 485//183 473//183 +f 473//182 485//182 486//182 474//182 +f 474//181 486//181 487//181 475//181 +f 475//192 487//192 488//192 476//192 +f 476//191 488//191 489//191 477//191 +f 477//190 489//190 490//190 478//190 +f 478//189 490//189 491//189 479//189 +f 479//188 491//188 492//188 480//188 +f 480//187 492//187 481//187 469//187 +f 481//198 493//198 494//198 482//198 +f 482//197 494//197 495//197 483//197 +f 483//196 495//196 496//196 484//196 +f 484//195 496//195 497//195 485//195 +f 485//194 497//194 498//194 486//194 +f 486//193 498//193 499//193 487//193 +f 487//204 499//204 500//204 488//204 +f 488//203 500//203 501//203 489//203 +f 489//202 501//202 502//202 490//202 +f 490//201 502//201 503//201 491//201 +f 491//200 503//200 504//200 492//200 +f 492//199 504//199 493//199 481//199 +f 493//210 505//210 506//210 494//210 +f 494//209 506//209 507//209 495//209 +f 495//208 507//208 508//208 496//208 +f 496//207 508//207 509//207 497//207 +f 497//206 509//206 510//206 498//206 +f 498//290 510//290 511//290 499//290 +f 499//216 511//216 512//216 500//216 +f 500//215 512//215 513//215 501//215 +f 501//214 513//214 514//214 502//214 +f 502//213 514//213 515//213 503//213 +f 503//212 515//212 516//212 504//212 +f 504//211 516//211 505//211 493//211 +f 505//222 517//222 518//222 506//222 +f 506//221 518//221 519//221 507//221 +f 507//220 519//220 520//220 508//220 +f 508//219 520//219 521//219 509//219 +f 509//218 521//218 522//218 510//218 +f 510//217 522//217 523//217 511//217 +f 511//228 523//228 524//228 512//228 +f 512//227 524//227 525//227 513//227 +f 513//226 525//226 526//226 514//226 +f 514//225 526//225 527//225 515//225 +f 515//224 527//224 528//224 516//224 +f 516//223 528//223 517//223 505//223 +f 517//234 529//234 530//234 518//234 +f 518//233 530//233 531//233 519//233 +f 519//232 531//232 532//232 520//232 +f 520//231 532//231 533//231 521//231 +f 521//230 533//230 534//230 522//230 +f 522//229 534//229 535//229 523//229 +f 523//240 535//240 536//240 524//240 +f 524//239 536//239 537//239 525//239 +f 525//238 537//238 538//238 526//238 +f 526//237 538//237 539//237 527//237 +f 527//236 539//236 540//236 528//236 +f 528//235 540//235 529//235 517//235 +f 529//246 541//246 542//246 530//246 +f 530//245 542//245 543//245 531//245 +f 531//244 543//244 544//244 532//244 +f 532//243 544//243 545//243 533//243 +f 533//242 545//242 546//242 534//242 +f 534//241 546//241 547//241 535//241 +f 535//252 547//252 548//252 536//252 +f 536//251 548//251 549//251 537//251 +f 537//250 549//250 550//250 538//250 +f 538//249 550//249 551//249 539//249 +f 539//248 551//248 552//248 540//248 +f 540//247 552//247 541//247 529//247 +f 541//258 553//258 554//258 542//258 +f 542//257 554//257 555//257 543//257 +f 543//256 555//256 556//256 544//256 +f 544//255 556//255 557//255 545//255 +f 545//254 557//254 558//254 546//254 +f 546//253 558//253 559//253 547//253 +f 547//264 559//264 560//264 548//264 +f 548//263 560//263 561//263 549//263 +f 549//262 561//262 562//262 550//262 +f 550//261 562//261 563//261 551//261 +f 551//260 563//260 564//260 552//260 +f 552//259 564//259 553//259 541//259 +f 553//291 565//291 566//291 554//291 +f 554//269 566//269 567//269 555//269 +f 555//268 567//268 568//268 556//268 +f 556//267 568//267 569//267 557//267 +f 557//266 569//266 570//266 558//266 +f 558//265 570//265 571//265 559//265 +f 559//276 571//276 572//276 560//276 +f 560//275 572//275 573//275 561//275 +f 561//274 573//274 574//274 562//274 +f 562//273 574//273 575//273 563//273 +f 563//272 575//272 576//272 564//272 +f 564//271 576//271 565//271 553//271 +f 565//282 1//282 2//282 566//282 +f 566//281 2//281 3//281 567//281 +f 567//280 3//280 4//280 568//280 +f 568//279 4//279 5//279 569//279 +f 569//278 5//278 6//278 570//278 +f 570//277 6//277 7//277 571//277 +f 571//288 7//288 8//288 572//288 +f 572//287 8//287 9//287 573//287 +f 573//286 9//286 10//286 574//286 +f 574//285 10//285 11//285 575//285 +f 575//284 11//284 12//284 576//284 +f 576//283 12//283 1//283 565//283 +o Suzanne +v -0.976520 1.877188 4.989921 +v -2.349117 1.877188 5.597023 +v -0.897037 1.702837 4.904596 +v -2.465719 1.702837 5.598426 +v -0.872087 1.543900 4.762233 +v -2.587833 1.543900 5.521110 +v -1.126074 1.468613 5.016210 +v -2.229053 1.468613 5.504059 +v -1.087971 1.642608 5.102356 +v -2.190951 1.642608 5.590206 +v -1.090370 1.847073 5.096932 +v -2.193350 1.847073 5.584782 +v -1.215753 1.906592 5.144740 +v -2.073626 1.906592 5.524178 +v -1.331014 1.754296 5.182304 +v -1.968291 1.754296 5.464172 +v -1.444064 1.610060 5.125481 +v -1.934277 1.610060 5.342303 +v -1.633451 1.886314 5.028583 +v -1.878557 1.886314 5.136994 +v -1.485554 1.967177 5.097933 +v -1.926746 1.967177 5.293073 +v -1.294510 2.018635 5.099193 +v -2.054341 2.018635 5.435266 +v -1.278198 2.141882 5.003558 +v -2.136071 2.141882 5.382997 +v -1.444009 2.180058 4.926833 +v -2.081286 2.180058 5.208701 +v -1.589768 2.159070 4.796058 +v -2.079982 2.159070 5.012880 +v -1.331249 2.241708 4.552328 +v -2.434229 2.241708 5.040177 +v -1.248543 2.247639 4.739319 +v -2.351523 2.247639 5.227168 +v -1.176603 2.171997 4.901968 +v -2.279583 2.171997 5.389817 +v -1.038965 2.112478 4.848740 +v -2.411561 2.112478 5.455841 +v -1.010032 2.128600 4.649125 +v -2.578714 2.128600 5.342955 +v -1.017791 2.092909 4.432810 +v -2.733537 2.092909 5.191687 +v -0.829963 1.798100 4.526184 +v -2.790816 1.798100 5.393472 +v -0.864556 1.901016 4.713002 +v -2.629323 1.901016 5.493562 +v -0.961766 1.981879 4.890763 +v -2.432406 1.981879 5.541229 +v -0.940447 1.996582 4.905836 +v -2.435597 1.996582 5.567143 +v -1.020619 2.138384 4.857090 +v -2.417726 2.138384 5.475032 +v -1.168954 2.216459 4.919262 +v -2.271934 2.216459 5.407111 +v -1.279830 2.175140 5.032996 +v -2.113193 2.175140 5.401593 +v -1.305424 2.040689 5.140774 +v -2.016233 2.040689 5.455166 +v -1.211439 1.917442 5.187623 +v -2.044801 1.917442 5.556221 +v -1.104807 2.048040 5.064292 +v -2.207787 2.048040 5.552141 +v -1.070827 1.846718 5.141118 +v -2.173806 1.846718 5.628968 +v -0.952227 1.880686 5.011717 +v -2.349334 1.880686 5.629660 +v -1.777516 2.236079 5.034153 +v -1.702459 2.197548 5.203849 +v -1.359803 0.637721 5.978560 +v -1.469394 1.197225 5.730786 +v -1.510880 1.402400 5.636990 +v -1.333184 0.488568 6.038742 +v -1.850174 2.070146 4.869880 +v -1.930747 2.276031 4.687712 +v -2.703733 1.695393 2.940067 +v -2.752624 0.926914 2.829530 +v -2.551693 0.243098 3.283814 +v -2.102766 0.041671 4.298794 +v -1.328206 1.181865 5.188654 +v -1.965483 1.181865 5.470523 +v -1.056945 0.830679 5.338146 +v -2.037371 0.830679 5.771791 +v -0.897541 0.460938 5.532900 +v -2.000520 0.460938 6.020750 +v -0.821352 0.144075 5.638898 +v -1.973353 0.144075 6.148430 +v -0.866346 0.058294 5.702815 +v -1.895794 0.058294 6.158141 +v -1.072148 0.054086 5.866961 +v -1.635893 0.054086 6.116306 +v -1.334477 0.053731 6.035820 +v -0.996516 1.219686 4.944713 +v -2.369112 1.219686 5.551815 +v -0.724257 1.372693 4.732048 +v -2.709620 1.372693 5.610177 +v -0.543624 1.553381 4.312223 +v -3.141754 1.553381 5.461380 +v -0.515540 2.096407 4.243204 +v -3.211713 2.096407 5.435725 +v -0.751078 2.204242 4.340121 +v -2.981548 2.204242 5.326661 +v -1.102573 2.431115 4.473026 +v -2.646745 2.431115 5.156016 +v -1.404469 2.699308 4.519300 +v -2.409406 2.699308 4.963786 +v -1.633366 2.665340 4.697487 +v -2.123580 2.665340 4.914309 +v -1.698730 2.333065 4.947251 +v -1.894815 2.333065 5.033980 +v -1.496078 2.243076 4.974754 +v -2.010802 2.243076 5.202417 +v -1.520257 2.078865 5.085731 +v -1.912428 2.078865 5.259188 +v -1.331014 1.754296 5.182304 +v -1.968291 1.754296 5.464172 +v -1.054322 1.605497 5.079047 +v -2.230834 1.605497 5.599420 +v -0.906462 1.643318 4.916414 +v -2.450634 1.643318 5.599404 +v -0.759295 1.800533 4.685958 +v -2.720148 1.800533 5.553246 +v -0.776414 1.957393 4.580997 +v -2.786288 1.957393 5.469966 +v -0.858362 2.084138 4.561363 +v -2.745683 2.084138 5.396128 +v -1.120043 2.225230 4.698559 +v -2.468129 2.225230 5.294820 +v -1.391149 2.306803 4.847574 +v -2.175490 2.306803 5.194489 +v -1.327094 0.514474 6.052512 +v -1.173360 0.581700 5.936287 +v -1.516510 0.581700 6.088062 +v -1.130098 0.391582 6.000970 +v -1.497758 0.391582 6.163587 +v -1.207108 0.309654 6.058758 +v -1.403194 0.309654 6.145487 +v -1.306710 0.291099 6.098598 +v -1.535099 1.347089 5.582232 +v -1.560446 1.418167 5.524926 +v -1.398153 1.406963 5.461182 +v -1.716792 1.406963 5.602116 +v -1.327120 1.302272 5.522396 +v -1.719290 1.302272 5.695853 +v -1.369140 1.205286 5.593035 +v -1.638757 1.205286 5.712287 +v -1.011896 1.486458 5.075582 +v -2.261940 1.486458 5.628478 +v -0.734596 1.588007 4.774928 +v -2.670938 1.588007 5.631375 +v -0.633116 1.778835 4.540565 +v -2.912607 1.778835 5.548787 +v -0.642298 2.076787 4.453547 +v -2.970811 2.076787 5.483451 +v -0.702164 2.198969 4.550097 +v -2.859102 2.198969 5.504113 +v -1.104096 2.455602 4.701485 +v -2.476693 2.455602 5.308587 +v -1.313203 2.626809 4.758772 +v -2.293629 2.626809 5.192416 +v -1.466793 2.607898 4.875322 +v -2.104070 2.607898 5.157191 +v -1.559279 2.331645 5.096893 +v -1.877917 2.331645 5.237827 +v -1.338439 1.540349 5.496803 +v -1.730610 1.540349 5.670260 +v -1.131712 0.951796 5.599778 +v -1.793500 0.951796 5.892488 +v -0.985904 0.560001 5.763791 +v -1.770246 0.560001 6.110706 +v -0.930387 0.369884 5.823053 +v -1.763749 0.369884 6.191651 +v -0.961854 0.206028 5.884424 +v -1.697174 0.206028 6.209657 +v -1.066205 0.183619 5.946655 +v -1.580929 0.183619 6.174318 +v -1.313088 0.168562 6.084179 +v -1.640876 1.672368 5.343082 +v -1.680660 1.944413 5.253134 +v -1.280633 2.303305 4.766152 +v -2.310081 2.303305 5.221478 +v -1.405600 1.828873 5.179316 +v -1.920324 1.828873 5.406979 +v -1.476851 1.937062 5.150738 +v -1.893533 1.937062 5.335037 +v -1.172999 0.626517 5.903975 +v -1.540659 0.626517 6.066591 +v -1.317392 0.988552 5.743161 +v -1.562499 0.988552 5.851572 +v -1.439945 0.988552 5.797366 +v -1.489081 1.149265 5.686275 +v -1.340171 1.264451 5.625402 +v -1.634299 1.264451 5.755496 +v -1.287671 1.346379 5.578455 +v -1.704353 1.346379 5.762754 +v -1.369184 1.466128 5.493548 +v -1.712334 1.466128 5.645324 +v -1.482456 1.477332 5.535611 +v -1.605009 1.477332 5.589816 +v -1.486804 1.409396 5.691423 +v -1.443151 1.473124 5.591346 +v -1.590215 1.473124 5.656392 +v -1.366646 1.461919 5.565546 +v -1.660774 1.461919 5.695639 +v -1.306309 1.375783 5.635704 +v -1.649458 1.375783 5.787479 +v -1.360006 1.320117 5.646815 +v -1.605112 1.320117 5.755226 +v -1.467691 1.264096 5.734635 +v -1.199373 0.995245 5.248030 +v -2.008225 0.995245 5.605786 +v -1.282557 1.243107 5.457505 +v -1.797280 1.243107 5.685168 +v -1.231284 1.142269 5.507170 +v -1.795029 1.142269 5.756516 +v -1.259928 1.084880 5.210508 +v -1.995248 1.084880 5.535741 +v -1.312657 0.313507 6.085153 +v -1.242098 0.324712 6.045907 +v -1.389162 0.324712 6.110953 +v -1.172811 0.413991 6.003786 +v -1.466939 0.413991 6.133879 +v -1.193483 0.540736 5.957050 +v -1.487610 0.540736 6.087143 +v -1.366468 0.418554 5.963491 +v -1.226766 0.470722 5.881799 +v -1.520894 0.470722 6.011892 +v -1.216574 0.359035 5.904842 +v -1.510702 0.359035 6.034936 +v -1.279770 0.295662 5.960734 +v -1.426834 0.295662 6.025780 +v -1.350329 0.284458 5.999980 +v -1.404952 1.970320 5.147653 +v -1.944186 1.970320 5.386157 +v -1.361185 1.873335 5.180348 +v -1.949441 1.873335 5.440535 +v -1.241472 2.250782 4.821562 +v -2.295431 2.250782 5.287730 +v -1.327477 2.254280 4.892143 +v -2.185350 2.254280 5.271582 +v -1.085705 2.220667 4.809321 +v -2.409281 2.220667 5.394741 +v -0.892589 2.079930 4.649622 +v -2.657357 2.079930 5.430181 +v -0.836567 1.982944 4.676897 +v -2.674866 1.982944 5.489979 +v -0.817672 1.841142 4.752746 +v -2.631461 1.841142 5.554987 +v -0.918648 1.743446 4.955122 +v -2.413798 1.743446 5.616428 +v -1.049503 1.709479 5.089943 +v -2.226014 1.709479 5.610316 +v -1.277535 1.813460 5.203830 +v -1.988344 1.813460 5.518222 +v -1.422432 2.082363 5.075003 +v -1.986177 2.082363 5.324348 +v -1.403146 2.194406 4.986092 +v -2.064934 2.194406 5.278801 +v -1.374030 2.149943 4.952536 +v -2.109349 2.149943 5.277769 +v -1.411518 2.060309 5.033422 +v -2.024284 2.060309 5.304449 +v -1.272568 1.813815 5.148803 +v -2.032398 1.813815 5.484877 +v -1.067488 1.728389 5.049281 +v -2.243999 1.728389 5.569654 +v -0.958170 1.751153 4.932023 +v -2.404299 1.751153 5.571648 +v -0.877172 1.856199 4.750735 +v -2.592918 1.856199 5.509612 +v -0.891679 1.957038 4.684808 +v -2.631936 1.957038 5.454526 +v -0.944728 2.042819 4.664256 +v -2.611453 2.042819 5.401451 +v -1.108583 2.187409 4.790725 +v -2.407648 2.187409 5.365303 +v -1.310833 2.213316 4.896645 +v -2.193217 2.213316 5.286924 +v -1.237084 2.209818 4.831484 +v -2.291042 2.209818 5.297652 +v -1.356218 1.873690 5.125321 +v -1.993495 1.873690 5.407190 +v -1.389288 1.952120 5.083682 +v -2.002054 1.952120 5.354709 +v -1.694882 2.155927 4.757179 +v -2.038031 2.155927 4.908954 +v -1.632854 2.454589 4.533003 +v -2.245620 2.454589 4.804031 +v -1.434775 2.466148 4.384525 +v -2.488733 2.466148 4.850692 +v -1.174034 2.238920 4.344590 +v -2.693695 2.238920 5.016738 +v -0.865251 2.034455 4.214500 +v -2.997679 2.034455 5.157676 +v -0.681709 1.937824 4.132543 +v -3.181796 1.937824 5.238335 +v -0.676148 1.509629 4.244501 +v -3.102703 1.509629 5.317770 +v -0.860660 1.311095 4.556168 +v -2.747981 1.311095 5.390933 +v -1.050614 1.228102 4.822402 +v -2.423211 1.228102 5.429503 +v -2.218793 2.481968 4.036467 +v -2.464513 2.259710 3.480919 +v -2.359949 0.009176 3.717328 +v -1.760312 0.436859 5.073048 +v -1.405433 -0.045332 5.875396 +v -1.538833 0.090894 5.573791 +v -1.641636 0.404969 5.341363 +v -1.697006 0.491460 5.216177 +v -0.766175 1.309070 3.709673 +v -3.437836 1.309070 4.891353 +v -0.845546 1.336752 3.497093 +v -3.541718 1.336752 4.689614 +v -1.186147 0.890763 3.091443 +v -3.612702 0.890763 4.164712 +v -1.895871 0.887318 2.811972 +v -3.342000 0.887318 3.451597 +v -0.833892 0.920418 4.053500 +v -3.137894 0.920418 5.072564 +v -1.160717 0.587840 3.910897 +v -3.023527 0.587840 4.734821 +v -1.285881 0.505965 3.429142 +v -3.295755 0.505965 4.318112 +v -1.923592 0.375064 3.279358 +v -2.977550 0.375064 3.745525 +v -1.307382 0.799552 5.103219 +v -2.042702 0.799552 5.428452 +v -1.455492 0.570245 5.000258 +v -2.019237 0.570245 5.249603 +v -1.098408 0.262102 5.343789 +v -2.005302 0.262102 5.744909 +v -1.235438 0.571968 5.199619 +v -2.019779 0.571968 5.546535 +v -0.950754 -0.014507 5.511975 +v -1.980202 -0.014507 5.967301 +v -1.322482 0.180174 5.466623 +v -1.763674 0.180174 5.661763 +v -1.434784 0.486542 5.278976 +v -1.826955 0.486542 5.452434 +v -1.173561 -0.022568 5.703932 +v -1.688285 -0.022568 5.931595 +v -1.345059 0.922444 5.084293 +v -2.031357 0.922444 5.387844 +v -1.355468 1.037629 5.093887 +v -2.017256 1.037629 5.386597 +v -1.370410 1.145464 5.093235 +v -2.007687 1.145464 5.375103 +v -1.469777 0.515644 4.835446 +v -2.131565 0.515644 5.128156 +v -1.613972 0.223373 4.145021 +v -2.545377 0.223373 4.556983 +v -1.761510 0.201371 3.612681 +v -2.839978 0.201371 4.089689 +v -1.885854 1.804950 2.867748 +v -3.307473 1.804950 3.496532 +v -1.728357 2.188632 3.223834 +v -3.149975 2.188632 3.852618 +v -1.521868 2.363284 3.690686 +v -2.943486 2.363284 4.319469 +v -1.271420 2.076484 4.223796 +v -2.717549 2.076484 4.863420 +v -0.864521 1.820206 4.017380 +v -3.144012 1.820206 5.025602 +v -1.061151 1.835974 3.970362 +v -3.046515 1.835974 4.848491 +v -1.275482 1.981326 3.452653 +v -3.285356 1.981326 4.341623 +v -0.936062 1.845810 3.557475 +v -3.436149 1.845810 4.663267 +v -1.097373 1.696354 3.192766 +v -3.597460 1.696354 4.298558 +v -1.438352 1.813314 3.084420 +v -3.448226 1.813314 3.973390 +v -1.556618 1.477239 2.817031 +v -3.566492 1.477239 3.706001 +v -1.208134 1.356425 2.942347 +v -3.708221 1.356425 4.048139 +v -1.541153 0.840725 2.951383 +v -3.477495 0.840725 3.807830 +v -1.610864 0.440514 3.356960 +v -3.130525 0.440514 4.029108 +v -1.000439 1.200933 3.312539 +v -3.574059 1.200933 4.450854 +v -1.255718 0.814662 4.491197 +v -2.530272 0.814662 5.054935 +v -1.418510 0.442895 4.023755 +v -2.766596 0.442895 4.620015 +v -0.938005 1.283571 3.155538 +v -3.732220 1.283571 4.391423 +v -0.850236 0.602188 3.850904 +v -3.276792 0.602188 4.924173 +v -0.566259 0.467079 3.366576 +v -3.826176 0.467079 4.808442 +v -0.304733 0.595599 2.930873 +v -4.324480 0.595599 4.708813 +v -0.291003 0.983895 2.663756 +v -4.531347 0.983895 4.539266 +v -0.546199 1.252798 2.583714 +v -4.418882 1.252798 4.296607 +v -0.801748 1.310897 2.900415 +v -4.012644 1.310897 4.320599 +v -0.776619 1.243317 2.990359 +v -3.963004 1.243317 4.399702 +v -0.574840 1.181364 2.717731 +v -4.300460 1.181364 4.365577 +v -0.404853 0.953781 2.770767 +v -4.375580 0.953781 4.527025 +v -0.410355 0.651265 2.990230 +v -4.209507 0.651265 4.670600 +v -0.588706 0.578766 3.348953 +v -3.824113 0.578766 4.779979 +v -0.795743 0.695675 3.742208 +v -3.393873 0.695675 4.891364 +v -0.862078 1.231047 3.194686 +v -3.754336 1.231047 4.473936 +v -0.845287 1.086457 3.133265 +v -3.811077 1.086457 4.445037 +v -0.773119 0.689389 3.561459 +v -3.542823 0.689389 4.786503 +v -0.627576 0.575978 3.227944 +v -3.887493 0.575978 4.669811 +v -0.475729 0.636918 2.941812 +v -4.201348 0.636918 4.589659 +v -0.461668 0.861004 2.774830 +v -4.334352 0.861004 4.487724 +v -0.596808 1.025214 2.734320 +v -4.273407 1.025214 4.360485 +v -0.778391 1.076318 2.953224 +v -3.989287 1.076318 4.373408 +v -0.953338 1.137560 3.319645 +v -3.600489 1.137560 4.490483 +v -0.957247 0.910686 3.343935 +v -3.579887 0.910686 4.503933 +v -1.050065 0.798644 3.465370 +v -3.427598 0.798644 4.516956 +v -0.949048 0.787439 3.428729 +v -3.522667 0.787439 4.567044 +v -0.885520 0.686601 3.472973 +v -3.532671 0.686601 4.643812 +v -0.922647 0.641784 3.521547 +v -3.471756 0.641784 4.649021 +v -0.945567 0.855323 3.834142 +v -3.225059 0.855323 4.842364 +v -1.007820 0.726145 3.726524 +v -3.262800 0.726145 4.723905 +v -1.040673 0.801077 3.652247 +v -3.295653 0.801077 4.649628 +v -0.994161 1.014313 3.426120 +v -3.494247 1.014313 4.531912 +v -0.893689 1.018876 3.255732 +v -3.687904 1.018876 4.491617 +v -0.922441 0.956214 3.190728 +v -3.716656 0.956214 4.426613 +v -0.949840 0.597677 3.460067 +v -3.498949 0.597677 4.587541 +v -0.900458 0.642494 3.406072 +v -3.572119 0.642494 4.587752 +v -0.961012 0.732128 3.368551 +v -3.559142 0.732128 4.517707 +v -1.065002 0.754537 3.398469 +v -3.467046 0.754537 4.460897 +v -0.972184 0.866579 3.277035 +v -3.619335 0.866579 4.447873 +v -0.779658 1.002451 2.884101 +v -4.039576 1.002451 4.325968 +v -0.593543 0.958699 2.675445 +v -4.319163 0.958699 4.323292 +v -0.449121 0.805692 2.703811 +v -4.395337 0.805692 4.449228 +v -0.458650 0.588958 2.881039 +v -4.257802 0.588958 4.561409 +v -0.647045 0.524520 3.150797 +v -3.931473 0.524520 4.603504 +v -0.807817 0.649135 3.483009 +v -3.577522 0.649135 4.708053 +v -0.858809 1.012590 3.069563 +v -3.849110 1.012590 4.392177 +v -0.879396 0.769594 3.288048 +v -3.673611 0.769594 4.523933 +v -0.792555 0.695017 3.285615 +v -3.733834 0.695017 4.586547 +v -0.736429 0.755247 3.147480 +v -3.873793 0.755247 4.535141 +v -0.806483 0.837175 3.154739 +v -3.821294 0.837175 4.488194 +v -0.758081 0.904755 3.032272 +v -3.944466 0.904755 4.441615 +v -0.683495 0.830179 3.035260 +v -3.992434 0.830179 4.498808 +v -0.611160 0.856440 2.966901 +v -4.091674 0.856440 4.506337 +v -0.671715 0.946074 2.929379 +v -4.078696 0.946074 4.436292 +v -0.886588 1.093150 2.708601 +v -4.097484 1.093150 4.128785 +v -0.579334 1.079158 2.442540 +v -4.501040 1.079158 4.177115 +v -0.302894 0.876771 2.570616 +v -4.592258 0.876771 4.467808 +v -0.314629 0.500034 2.775984 +v -4.432420 0.500034 4.597288 +v -0.667381 0.335113 3.137949 +v -3.927298 0.335113 4.579815 +v -0.949509 0.433466 3.560203 +v -3.425085 0.433466 4.655153 +v -1.064217 1.110286 3.002701 +v -3.760389 1.110286 4.195221 +vn 0.8960 0.2268 0.3818 +vn -0.3203 0.2268 0.9198 +vn 0.9845 0.0034 0.1754 +vn -0.5326 0.0034 0.8464 +vn 0.7065 -0.4192 0.5701 +vn -0.0535 -0.4192 0.9063 +vn 0.7066 0.0035 0.7076 +vn 0.0482 0.0035 0.9988 +vn 0.3313 0.0089 0.9435 +vn 0.4752 0.0089 0.8798 +vn 0.0986 -0.4464 0.8894 +vn 0.5917 -0.4464 0.6713 +vn -0.4471 0.0170 0.8943 +vn 0.9625 0.0170 0.2709 +vn -0.0944 0.3104 0.9459 +vn 0.7633 0.3104 0.5665 +vn -0.1881 0.6302 0.7533 +vn 0.6839 0.6302 0.3677 +vn -0.5873 0.5758 0.5688 +vn 0.8159 0.5758 -0.0519 +vn -0.2365 0.9676 0.0881 +vn 0.2243 0.9676 -0.1157 +vn 0.0629 0.9241 0.3769 +vn 0.2365 0.9241 0.3001 +vn 0.4732 0.8657 0.1631 +vn -0.1976 0.8657 0.4598 +vn 0.3741 0.9102 -0.1778 +vn -0.3833 0.9102 0.1571 +vn 0.8529 0.5086 -0.1181 +vn -0.6612 0.5086 0.5515 +vn 0.8119 0.5567 0.1759 +vn -0.4161 0.5567 0.7190 +vn 0.5052 0.0043 -0.8630 +vn -0.9784 0.0043 -0.2068 +vn -0.1749 0.3935 -0.9025 +vn -0.5501 0.3935 -0.7366 +vn -0.7153 0.4000 -0.5731 +vn 0.0572 0.4000 -0.9147 +vn -0.9827 0.0141 -0.1846 +vn 0.5246 0.0141 -0.8513 +vn -0.8241 -0.5467 0.1486 +vn 0.6643 -0.5467 -0.5097 +vn -0.3688 -0.9054 0.2102 +vn 0.4036 -0.9054 -0.1315 +vn 0.1893 -0.9787 -0.0792 +vn -0.1859 -0.9787 0.0868 +vn 0.6442 -0.5609 -0.5201 +vn -0.8181 -0.5609 0.1267 +vn 0.6889 0.4495 0.5686 +vn -0.0428 0.4495 0.8923 +vn 0.6360 0.3665 0.6791 +vn 0.0746 0.3665 0.9274 +vn 0.4581 0.3834 0.8020 +vn 0.2852 0.3834 0.8785 +vn 0.2867 0.5236 0.8023 +vn 0.4007 0.5236 0.7519 +vn 0.2746 0.5690 0.7751 +vn 0.3887 0.5690 0.7246 +vn 0.3841 0.6821 0.6222 +vn 0.2020 0.6821 0.7028 +vn 0.5582 0.6532 0.5116 +vn 0.0030 0.6532 0.7572 +vn 0.6626 0.5496 0.5088 +vn -0.0693 0.5496 0.8326 +vn 0.4703 -0.4503 0.7590 +vn 0.2451 -0.4503 0.8586 +vn 0.5450 -0.4750 0.6909 +vn 0.1445 -0.4750 0.8680 +vn 0.8662 -0.0147 0.4994 +vn -0.2133 -0.0147 0.9769 +vn 0.9300 0.3074 0.2017 +vn -0.4764 0.3073 0.8238 +vn 0.9026 0.4152 0.1134 +vn -0.5234 0.4152 0.7441 +vn 0.6384 -0.4246 0.6420 +vn 0.0455 -0.4246 0.9042 +vn 0.7169 -0.2022 0.6672 +vn 0.0114 -0.2022 0.9793 +vn 0.9233 -0.0266 0.3831 +vn -0.3378 -0.0266 0.9409 +vn 0.9059 0.0575 0.4196 +vn -0.2990 0.0575 0.9525 +vn 0.6715 0.7219 0.1671 +vn -0.3281 0.7219 0.6093 +vn 0.5127 0.8535 -0.0928 +vn -0.4136 0.8535 0.3169 +vn 0.6460 0.7620 -0.0457 +vn -0.4684 0.7620 0.4472 +vn 0.0664 0.9636 0.2590 +vn 0.1470 0.9636 0.2234 +vn -0.5268 0.6017 0.6004 +vn 0.7986 0.6017 0.0142 +vn -0.4621 0.7869 0.4089 +vn 0.6134 0.7869 -0.0668 +vn 0.8300 0.0837 0.5514 +vn -0.1504 0.0837 0.9851 +vn 0.5951 0.0770 0.7999 +vn 0.1915 0.0770 0.9785 +vn 0.3306 0.0175 0.9436 +vn 0.4758 0.0175 0.8794 +vn 0.3238 -0.1388 0.9359 +vn 0.4746 -0.1388 0.8692 +vn 0.4458 -0.0225 0.8949 +vn 0.3622 -0.0225 0.9318 +vn 0.6186 0.3356 0.7104 +vn 0.1094 0.3356 0.9356 +vn 0.6957 0.3447 0.6302 +vn -0.0017 0.3447 0.9387 +vn 0.6742 0.2514 0.6944 +vn 0.0601 0.2514 0.9660 +vn 0.6434 0.5576 0.5245 +vn -0.0448 0.5576 0.8289 +vn 0.1395 0.7473 0.6496 +vn 0.3868 0.7473 0.5403 +vn 0.7321 0.0832 0.6761 +vn 0.0077 0.0832 0.9965 +vn 0.5462 0.3148 0.7762 +vn 0.2068 0.3148 0.9263 +vn 0.3840 0.3751 0.8437 +vn 0.3659 0.3751 0.8517 +vn 0.4619 -0.1909 0.8661 +vn 0.3301 -0.1909 0.9244 +vn 0.5675 -0.0404 0.8224 +vn 0.2267 -0.0404 0.9731 +vn 0.6564 0.2724 0.7035 +vn 0.0790 0.2724 0.9589 +vn 0.6078 0.4999 0.6170 +vn 0.0476 0.4999 0.8648 +vn 0.6442 0.4043 0.6492 +vn 0.0469 0.4043 0.9134 +vn 0.3124 0.5956 0.7400 +vn 0.3374 0.5956 0.7290 +vn 0.3496 0.4925 0.7970 +vn 0.3544 0.4925 0.7949 +vn 0.3382 0.5486 0.7646 +vn 0.8600 -0.5045 -0.0763 +vn -0.6350 -0.5045 0.5850 +vn 0.7448 0.0744 -0.6632 +vn -0.9917 0.0744 0.1049 +vn -0.2654 0.5323 -0.8039 +vn -0.4162 0.5323 -0.7371 +vn -0.7839 0.4644 0.4121 +vn 0.8323 0.4644 -0.3028 +vn 0.6550 -0.6138 0.4408 +vn -0.1145 -0.6138 0.7811 +vn 0.6495 -0.0602 0.7580 +vn 0.1239 -0.0602 0.9905 +vn -0.5770 0.7545 0.3128 +vn 0.6196 0.7545 -0.2164 +vn 0.1257 0.9918 0.0242 +vn -0.0667 0.9918 0.1093 +vn 0.8259 0.5637 -0.0070 +vn -0.5608 0.5637 0.6063 +vn 0.7112 -0.2761 0.6465 +vn -0.0002 -0.2761 0.9611 +vn 0.3196 0.7265 0.6083 +vn 0.2350 0.7265 0.6457 +vn 0.4293 0.1661 0.8878 +vn 0.3680 0.1661 0.9149 +vn -0.0735 0.3486 0.9344 +vn 0.7408 0.3486 0.5743 +vn 0.1925 0.1139 0.9747 +vn 0.5916 0.1139 0.7981 +vn 0.7271 0.3812 0.5709 +vn -0.0668 0.3812 0.9221 +vn 0.5860 0.5644 0.5814 +vn 0.0359 0.5644 0.8247 +vn 0.8450 0.5257 -0.0976 +vn -0.6407 0.5257 0.5595 +vn 0.8818 0.4398 -0.1702 +vn -0.7192 0.4398 0.5379 +vn 0.9262 0.3017 -0.2260 +vn -0.7903 0.3017 0.5332 +vn 0.5941 -0.1503 0.7902 +vn 0.1849 -0.1503 0.9712 +vn 0.2311 0.3639 0.9023 +vn 0.5121 0.3639 0.7780 +vn 0.0963 0.4390 0.8933 +vn 0.5961 0.4390 0.6722 +vn 0.1975 0.6985 0.6878 +vn 0.3760 0.6985 0.6088 +vn -0.2636 0.9426 -0.2049 +vn 0.0258 0.9426 -0.3329 +vn -0.7464 0.6650 0.0267 +vn 0.5219 0.6650 -0.5342 +vn -0.9145 -0.0000 0.4045 +vn 0.9145 -0.0000 -0.4045 +vn 0.5287 -0.7253 0.4410 +vn -0.0294 -0.7253 0.6878 +vn 0.4161 0.2741 0.8670 +vn 0.3616 0.2741 0.8912 +vn 0.5147 0.2180 0.8292 +vn 0.2673 0.2180 0.9386 +vn -0.1281 0.2172 0.9677 +vn 0.8021 0.2172 0.5563 +vn -0.0302 0.4709 0.8817 +vn 0.6727 0.4709 0.5708 +vn 0.1318 0.5674 0.8128 +vn 0.5127 0.5674 0.6443 +vn -0.1917 -0.0325 0.9809 +vn 0.8547 -0.0325 0.5181 +vn 0.2595 -0.2766 0.9253 +vn 0.5100 -0.2766 0.8145 +vn 0.7391 -0.1338 0.6602 +vn -0.0087 -0.1338 0.9910 +vn 0.8557 0.0425 0.5156 +vn -0.1942 0.0425 0.9800 +vn 0.8164 0.3679 0.4452 +vn -0.2199 0.3679 0.9035 +vn 0.7703 0.5446 0.3317 +vn -0.2729 0.5446 0.7931 +vn 0.5436 0.8386 0.0367 +vn -0.3386 0.8386 0.4268 +vn 0.3552 0.9211 0.1593 +vn -0.1211 0.9211 0.3700 +vn 0.3804 0.8053 0.4548 +vn 0.0805 0.8053 0.5874 +vn 0.2558 0.7397 0.6224 +vn 0.2884 0.7397 0.6080 +vn 0.6786 0.1169 0.7251 +vn 0.0799 0.1169 0.9899 +vn 0.6273 0.2884 0.7234 +vn 0.1132 0.2884 0.9508 +vn 0.1539 -0.1524 0.9763 +vn 0.6188 -0.1524 0.7706 +vn 0.3442 -0.4144 0.8425 +vn 0.3918 -0.4144 0.8215 +vn 0.0401 0.2753 0.9605 +vn 0.6836 0.2753 0.6759 +vn -0.0033 0.5042 0.8636 +vn 0.6411 0.5042 0.5786 +vn 0.0742 0.8667 0.4932 +vn 0.3150 0.8667 0.3867 +vn 0.0982 0.9467 0.3066 +vn 0.1608 0.9467 0.2790 +vn 0.4461 0.8872 0.1179 +vn -0.2129 0.8872 0.4094 +vn 0.5705 0.8193 0.0568 +vn -0.3418 0.8193 0.4603 +vn 0.9091 0.0870 0.4073 +vn -0.3103 0.0870 0.9467 +vn 0.9566 0.2531 0.1444 +vn -0.5368 0.2531 0.8049 +vn 0.8975 0.4316 0.0909 +vn -0.5365 0.4316 0.7251 +vn -0.7485 0.5431 -0.3805 +vn 0.2221 0.5431 -0.8098 +vn -0.9865 0.1043 -0.1263 +vn 0.5702 0.1043 -0.8148 +vn -0.5627 0.4877 -0.6675 +vn -0.1153 0.4877 -0.8653 +vn 0.2736 0.4579 -0.8458 +vn -0.8099 0.4579 -0.3666 +vn 0.0373 0.5612 -0.8268 +vn -0.6368 0.5612 -0.5287 +vn -0.0424 0.6072 -0.7934 +vn -0.5585 0.6072 -0.5652 +vn 0.5844 -0.1617 -0.7952 +vn -0.9815 -0.1617 -0.1025 +vn 0.5011 -0.8448 -0.1878 +vn -0.4761 -0.8448 0.2444 +vn 0.3847 -0.9194 -0.0818 +vn -0.3193 -0.9194 0.2296 +vn 0.0786 -0.9133 -0.3996 +vn -0.3485 -0.9133 -0.2107 +vn -0.0904 -0.5797 -0.8098 +vn -0.5383 -0.5797 -0.6117 +vn -0.0919 -0.8639 -0.4952 +vn -0.3046 -0.8639 -0.4012 +vn 0.2767 -0.8985 0.3407 +vn 0.0660 -0.8985 0.4339 +vn 0.2464 -0.9324 0.2644 +vn 0.0299 -0.9324 0.3602 +vn 0.7846 -0.1611 -0.5986 +vn -0.9708 -0.1611 0.1778 +vn 0.7064 -0.0417 -0.7065 +vn -0.9980 -0.0417 0.0473 +vn -0.1895 -0.5238 -0.8305 +vn -0.4869 -0.5238 -0.6989 +vn -0.2989 -0.7173 -0.6294 +vn -0.2646 -0.7173 -0.6446 +vn 0.5553 -0.5674 -0.6080 +vn -0.8234 -0.5674 0.0018 +vn 0.6860 -0.0865 -0.7225 +vn -0.9960 -0.0865 0.0215 +vn 0.7151 0.1548 -0.6816 +vn -0.9854 0.1548 0.0705 +vn 0.6804 0.1546 -0.7164 +vn -0.9877 0.1546 0.0215 +vn 0.7657 0.2458 -0.5944 +vn -0.9549 0.2458 0.1666 +vn 0.2075 -0.9776 0.0339 +vn -0.1146 -0.9776 0.1763 +vn 0.1739 -0.8341 -0.5236 +vn -0.5043 -0.8341 -0.2236 +vn 0.3103 -0.9483 -0.0664 +vn -0.2579 -0.9483 0.1850 +vn 0.4231 -0.8662 0.2660 +vn -0.0878 -0.8662 0.4920 +vn 0.4328 -0.8785 0.2025 +vn -0.1413 -0.8785 0.4564 +vn 0.9184 -0.2076 -0.3369 +vn -0.8671 -0.2076 0.4528 +vn 0.9513 -0.2893 0.1069 +vn -0.5609 -0.2893 0.7757 +vn 0.0260 -0.6078 -0.7937 +vn -0.6047 -0.6078 -0.5147 +vn -0.0244 0.9355 -0.3524 +vn -0.2443 0.9355 -0.2551 +vn -0.1622 0.7131 -0.6820 +vn -0.3954 0.7131 -0.5788 +vn -0.0575 0.1000 -0.9933 +vn -0.6962 0.1000 -0.7108 +vn 0.1315 0.6714 -0.7293 +vn -0.6280 0.6714 -0.3934 +vn 0.2447 0.9647 0.0971 +vn -0.0928 0.9647 0.2464 +vn 0.4353 0.7399 -0.5130 +vn -0.6724 0.7399 -0.0231 +vn 0.4227 0.4565 -0.7829 +vn -0.8636 0.4565 -0.2139 +vn 0.3240 -0.2253 -0.9189 +vn -0.8978 -0.2253 -0.3785 +vn 0.1015 -0.0499 -0.9936 +vn -0.8034 -0.0499 -0.5933 +vn 0.3623 0.5271 -0.7687 +vn -0.8125 0.5271 -0.2491 +vn 0.5580 0.6515 -0.5141 +vn -0.7557 0.6515 0.0669 +vn 0.7716 0.6308 -0.0819 +vn -0.5797 0.6308 0.5158 +vn 0.0546 0.6877 -0.7240 +vn -0.5724 0.6877 -0.4467 +vn 0.3463 0.9115 0.2218 +vn -0.0689 0.9115 0.4054 +vn -0.5120 -0.4498 -0.7318 +vn -0.1970 -0.4498 -0.8711 +vn -0.0285 0.9851 -0.1693 +vn -0.1061 0.9851 -0.1350 +vn 0.1391 0.6862 0.7140 +vn 0.4347 0.6862 0.5833 +vn 0.8680 0.1915 -0.4581 +vn -0.9229 0.1915 0.3340 +vn 0.9498 0.2457 -0.1937 +vn -0.7823 0.2457 0.5724 +vn 0.8311 0.1230 -0.5423 +vn -0.9604 0.1230 0.2500 +vn 0.8658 -0.0326 -0.4994 +vn -0.9519 -0.0326 0.3046 +vn 0.8232 -0.5585 0.1021 +vn -0.4783 -0.5585 0.6777 +vn 0.5741 -0.7264 0.3779 +vn -0.1066 -0.7264 0.6790 +vn 0.6783 -0.5960 0.4297 +vn -0.1384 -0.5960 0.7909 +vn 0.8042 -0.5017 0.3187 +vn -0.3053 -0.5017 0.8094 +vn 0.8926 -0.4004 0.2072 +vn -0.4472 -0.4004 0.7998 +vn 0.6296 0.0919 0.7715 +vn 0.1472 0.0919 0.9848 +vn 0.3203 -0.6869 -0.6524 +vn -0.6982 -0.6869 -0.2019 +vn 0.2430 -0.6840 -0.6878 +vn -0.6724 -0.6840 -0.2830 +vn 0.3380 -0.9408 -0.0244 +vn -0.2454 -0.9408 0.2336 +vn 0.7048 -0.6846 -0.1859 +vn -0.6117 -0.6846 0.3964 +vn 0.2998 -0.7785 -0.5514 +vn -0.6097 -0.7785 -0.1492 +vn 0.5255 0.7877 0.3215 +vn -0.1157 0.7877 0.6051 +vn 0.6747 0.6223 0.3968 +vn -0.1603 0.6223 0.7662 +vn 0.5376 0.6150 0.5769 +vn 0.0652 0.6150 0.7858 +vn 0.5691 0.4629 0.6796 +vn 0.1200 0.4629 0.8783 +vn 0.7368 0.4347 0.5179 +vn -0.1125 0.4347 0.8935 +vn 0.8807 0.1409 0.4523 +vn -0.2579 0.1409 0.9559 +vn 0.4692 0.8816 -0.0518 +vn -0.3539 0.8816 0.3123 +vn -0.2631 0.9626 0.0645 +vn 0.2247 0.9626 -0.1512 +vn -0.6356 0.4720 0.6110 +vn 0.8796 0.4720 -0.0592 +vn -0.1397 0.1257 0.9822 +vn 0.8207 0.1257 0.5574 +vn 0.7739 -0.0966 0.6258 +vn -0.0576 -0.0966 0.9936 +vn 0.9274 -0.0714 0.3672 +vn -0.3522 -0.0714 0.9332 +vn 0.7737 -0.6326 -0.0357 +vn -0.5469 -0.6326 0.5484 +vn 0.0786 0.7541 0.6520 +vn 0.4296 0.7541 0.4968 +vn 0.8411 -0.4245 -0.3352 +vn -0.8139 -0.4245 0.3967 +vn 0.3924 0.6687 0.6316 +vn 0.2033 0.6687 0.7152 +vn -0.0105 0.9728 -0.2315 +vn -0.1642 0.9728 -0.1635 +vn 0.8659 0.3428 0.3642 +vn -0.3131 0.3428 0.8857 +vn 0.5486 0.8176 0.1750 +vn -0.2396 0.8176 0.5236 +vn 0.9592 -0.2153 0.1830 +vn -0.5099 -0.2153 0.8328 +vn 0.8603 0.0396 0.5083 +vn -0.2027 0.0396 0.9784 +vn 0.8667 0.0143 0.4986 +vn -0.2142 0.0143 0.9767 +vn 0.9179 -0.1833 -0.3519 +vn -0.8779 -0.1833 0.4424 +vn 0.8809 -0.3960 -0.2592 +vn -0.7844 -0.3960 0.4774 +vn 0.8915 -0.0549 -0.4497 +vn -0.9325 -0.0550 0.3570 +vn 0.8791 -0.4415 0.1796 +vn -0.4585 -0.4415 0.7712 +vn 0.8907 -0.4494 0.0688 +vn -0.5483 -0.4494 0.7052 +vn 0.8376 -0.5229 0.1577 +vn -0.4468 -0.5229 0.7259 +vn -0.0668 0.7927 -0.6060 +vn -0.4034 0.7927 -0.4571 +vn 0.8254 0.3464 -0.4457 +vn -0.8851 0.3464 0.3108 +vn 0.7909 -0.5936 0.1483 +vn -0.4224 -0.5936 0.6850 +vn -0.2031 0.8757 -0.4381 +vn -0.1875 0.8757 -0.4450 +vn 0.8405 -0.4064 0.3584 +vn -0.3003 -0.4064 0.8630 +vn 0.5693 -0.5495 0.6115 +vn 0.0694 -0.5495 0.8326 +vn -0.7233 -0.5454 0.4235 +vn 0.7999 -0.5454 -0.2502 +vn -0.9838 -0.0745 -0.1628 +vn 0.5414 -0.0745 -0.8374 +vn -0.7771 0.5168 -0.3591 +vn 0.2571 0.5168 -0.8165 +vn -0.2495 0.8697 -0.4258 +vn -0.1472 0.8697 -0.4711 +vn 0.4618 0.5613 0.6868 +vn 0.1975 0.5613 0.8037 +vn 0.6040 0.5710 0.5560 +vn 0.0050 0.5710 0.8209 +vn 0.6020 0.5403 0.5880 +vn 0.0301 0.5403 0.8409 +vn 0.4834 0.5632 0.6702 +vn 0.1707 0.5632 0.8085 +vn 0.5576 0.7280 0.3988 +vn -0.0801 0.7280 0.6809 +vn 0.6836 0.4736 0.5553 +vn -0.0491 0.4736 0.8794 +vn 0.5748 0.4787 0.6637 +vn 0.1044 0.4787 0.8718 +vn 0.3981 0.5385 0.7426 +vn 0.2816 0.5385 0.7941 +vn -0.2825 0.9232 0.2605 +vn 0.3827 0.9232 -0.0338 +vn 0.8306 0.4553 0.3207 +vn -0.3215 0.4553 0.8303 +vn 0.7482 0.3108 0.5861 +vn -0.0697 0.3108 0.9479 +vn 0.7937 0.2790 0.5406 +vn -0.1340 0.2790 0.9509 +vn 0.6965 0.6629 0.2747 +vn -0.2653 0.6629 0.7001 +vn 0.4001 0.9010 0.1680 +vn -0.1449 0.9010 0.4090 +vn 0.2781 -0.8739 0.3987 +vn 0.1079 -0.8739 0.4740 +vn 0.6526 -0.7194 0.2379 +vn -0.2630 -0.7194 0.6429 +vn 0.9975 -0.0580 -0.0402 +vn -0.7008 -0.0580 0.7110 +vn 0.6344 0.4328 -0.6405 +vn -0.9007 0.4328 0.0385 +vn -0.5108 0.6412 -0.5727 +vn -0.0800 0.6412 -0.7632 +vn -0.6511 0.6684 -0.3597 +vn 0.1719 0.6684 -0.7237 +vn -0.7752 -0.4704 -0.4217 +vn 0.2095 -0.4704 -0.8572 +vn -0.5012 -0.5585 -0.6610 +vn -0.1518 -0.5585 -0.8155 +vn 0.0550 -0.4792 -0.8760 +vn -0.6851 -0.4792 -0.5486 +vn -0.8467 0.5318 0.0187 +vn 0.5834 0.5318 -0.6138 +vn 0.2668 -0.6191 -0.7386 +vn -0.7260 -0.6191 -0.2995 +vn -0.1210 -0.9333 0.3380 +vn 0.3315 -0.9333 0.1378 +usemtl None +s off +f 623//292 577//292 579//292 621//292 +f 580//293 578//293 624//293 622//293 +f 621//294 579//294 581//294 619//294 +f 582//295 580//295 622//295 620//295 +f 579//296 585//296 583//296 581//296 +f 584//297 586//297 580//297 582//297 +f 577//298 587//298 585//298 579//298 +f 586//299 588//299 578//299 580//299 +f 587//300 589//300 591//300 585//300 +f 592//301 590//301 588//301 586//301 +f 585//302 591//302 593//302 583//302 +f 594//303 592//303 586//303 584//303 +f 591//304 597//304 595//304 593//304 +f 596//305 598//305 592//305 594//305 +f 589//306 599//306 597//306 591//306 +f 598//307 600//307 590//307 592//307 +f 599//308 601//308 603//308 597//308 +f 604//309 602//309 600//309 598//309 +f 597//310 603//310 605//310 595//310 +f 606//311 604//311 598//311 596//311 +f 603//312 609//312 607//312 605//312 +f 608//313 610//313 604//313 606//313 +f 601//314 611//314 609//314 603//314 +f 610//315 612//315 602//315 604//315 +f 611//316 613//316 615//316 609//316 +f 616//317 614//317 612//317 610//317 +f 609//318 615//318 617//318 607//318 +f 618//319 616//319 610//319 608//319 +f 615//320 621//320 619//320 617//320 +f 620//321 622//321 616//321 618//321 +f 613//322 623//322 621//322 615//322 +f 622//323 624//323 614//323 616//323 +f 623//324 613//324 627//324 625//324 +f 628//325 614//325 624//325 626//325 +f 613//326 611//326 629//326 627//326 +f 630//327 612//327 614//327 628//327 +f 611//328 601//328 631//328 629//328 +f 632//329 602//329 612//329 630//329 +f 601//330 599//330 633//330 631//330 +f 634//331 600//331 602//331 632//331 +f 599//332 589//332 635//332 633//332 +f 636//333 590//333 600//333 634//333 +f 589//334 587//334 639//334 635//334 +f 640//335 588//335 590//335 636//335 +f 587//336 577//336 641//336 639//336 +f 642//337 578//337 588//337 640//337 +f 577//338 623//338 625//338 641//338 +f 626//339 624//339 578//339 642//339 +f 637//340 641//340 625//340 +f 626//341 642//341 638//341 +f 639//342 641//342 637//342 +f 638//343 642//343 640//343 +f 637//344 635//344 639//344 +f 640//345 636//345 638//345 +f 637//346 633//346 635//346 +f 636//347 634//347 638//347 +f 637//348 631//348 633//348 +f 634//349 632//349 638//349 +f 637//350 629//350 631//350 +f 632//351 630//351 638//351 +f 637//352 627//352 629//352 +f 630//353 628//353 638//353 +f 637//354 625//354 627//354 +f 628//355 626//355 638//355 +f 665//356 750//356 752//356 667//356 +f 752//357 751//357 666//357 667//357 +f 663//358 748//358 750//358 665//358 +f 751//359 749//359 664//359 666//359 +f 661//360 746//360 748//360 663//360 +f 749//361 747//361 662//361 664//361 +f 659//362 744//362 746//362 661//362 +f 747//363 745//363 660//363 662//363 +f 657//364 742//364 744//364 659//364 +f 745//365 743//365 658//365 660//365 +f 655//366 668//366 722//366 740//366 +f 723//367 669//367 656//367 741//367 +f 668//368 670//368 724//368 722//368 +f 725//369 671//369 669//369 723//369 +f 670//370 672//370 726//370 724//370 +f 727//371 673//371 671//371 725//371 +f 672//372 674//372 728//372 726//372 +f 729//373 675//373 673//373 727//373 +f 674//374 676//374 730//374 728//374 +f 731//375 677//375 675//375 729//375 +f 676//376 678//376 732//376 730//376 +f 733//377 679//377 677//377 731//377 +f 678//378 680//378 734//378 732//378 +f 735//379 681//379 679//379 733//379 +f 680//380 682//380 736//380 734//380 +f 737//381 683//381 681//381 735//381 +f 682//382 684//382 738//382 736//382 +f 739//383 685//383 683//383 737//383 +f 684//384 643//384 644//384 738//384 +f 644//385 643//385 685//385 739//385 +f 686//386 704//386 736//386 738//386 +f 737//387 705//387 687//387 739//387 +f 704//388 755//388 734//388 736//388 +f 735//389 756//389 705//389 737//389 +f 702//390 732//390 734//390 755//390 +f 735//391 733//391 703//391 756//391 +f 700//392 730//392 732//392 702//392 +f 733//393 731//393 701//393 703//393 +f 698//394 728//394 730//394 700//394 +f 731//395 729//395 699//395 701//395 +f 696//396 726//396 728//396 698//396 +f 729//397 727//397 697//397 699//397 +f 694//398 724//398 726//398 696//398 +f 727//399 725//399 695//399 697//399 +f 692//400 722//400 724//400 694//400 +f 725//401 723//401 693//401 695//401 +f 690//402 740//402 722//402 692//402 +f 723//403 741//403 691//403 693//403 +f 690//404 757//404 753//404 740//404 +f 753//405 758//405 691//405 741//405 +f 686//406 738//406 644//406 688//406 +f 644//407 739//407 687//407 689//407 +f 688//408 644//408 754//408 759//408 +f 754//409 644//409 689//409 760//409 +f 753//410 757//410 759//410 754//410 +f 760//411 758//411 753//411 754//411 +f 711//412 713//412 752//412 750//412 +f 752//413 713//413 712//413 751//413 +f 709//414 711//414 750//414 748//414 +f 751//415 712//415 710//415 749//415 +f 707//416 709//416 748//416 746//416 +f 749//417 710//417 708//417 747//417 +f 742//418 763//418 761//418 744//418 +f 762//419 764//419 743//419 745//419 +f 707//420 746//420 744//420 761//420 +f 745//421 747//421 708//421 762//421 +f 720//422 766//422 765//422 763//422 +f 765//423 766//423 721//423 764//423 +f 761//424 763//424 765//424 645//424 +f 765//425 764//425 762//425 645//425 +f 706//426 707//426 761//426 645//426 +f 762//426 708//426 706//426 645//426 +f 718//427 769//427 767//427 720//427 +f 768//428 770//428 719//428 721//428 +f 716//429 771//429 769//429 718//429 +f 770//430 772//430 717//430 719//430 +f 715//431 773//431 771//431 716//431 +f 772//432 774//432 715//432 717//432 +f 714//433 647//433 773//433 715//433 +f 774//434 647//434 714//434 715//434 +f 766//435 720//435 767//435 646//435 +f 768//436 721//436 766//436 646//436 +f 646//437 767//437 782//437 784//437 +f 783//438 768//438 646//438 784//438 +f 647//439 775//439 776//439 773//439 +f 777//440 775//440 647//440 774//440 +f 773//441 776//441 778//441 771//441 +f 779//442 777//442 774//442 772//442 +f 771//443 778//443 780//443 769//443 +f 781//444 779//444 772//444 770//444 +f 769//445 780//445 782//445 767//445 +f 783//446 781//446 770//446 768//446 +f 775//447 780//447 778//447 776//447 +f 779//448 781//448 775//448 777//448 +f 775//449 784//449 782//449 780//449 +f 783//450 784//450 775//450 781//450 +f 715//451 716//451 740//451 753//451 +f 741//452 717//452 715//452 753//452 +f 716//453 718//453 787//453 740//453 +f 788//454 719//454 717//454 741//454 +f 718//455 720//455 789//455 787//455 +f 790//456 721//456 719//456 788//456 +f 720//457 763//457 742//457 789//457 +f 743//458 764//458 721//458 790//458 +f 657//459 785//459 789//459 742//459 +f 790//460 786//460 658//460 743//460 +f 785//461 791//461 787//461 789//461 +f 788//462 792//462 786//462 790//462 +f 655//463 740//463 787//463 791//463 +f 788//464 741//464 656//464 792//464 +f 707//465 706//465 648//465 798//465 +f 648//466 706//466 708//466 799//466 +f 709//467 707//467 798//467 796//467 +f 799//468 708//468 710//468 797//468 +f 711//469 709//469 796//469 794//469 +f 797//470 710//470 712//470 795//470 +f 713//471 711//471 794//471 793//471 +f 795//472 712//472 713//472 793//472 +f 793//473 794//473 805//473 807//473 +f 806//474 795//474 793//474 807//474 +f 794//475 796//475 803//475 805//475 +f 804//476 797//476 795//476 806//476 +f 796//477 798//477 801//477 803//477 +f 802//478 799//478 797//478 804//478 +f 798//479 648//479 800//479 801//479 +f 800//480 648//480 799//480 802//480 +f 800//481 807//481 805//481 801//481 +f 806//482 807//482 800//482 802//482 +f 801//483 805//483 803//483 +f 804//484 806//484 802//484 +f 759//485 757//485 810//485 808//485 +f 811//486 758//486 760//486 809//486 +f 688//487 759//487 808//487 830//487 +f 809//488 760//488 689//488 831//488 +f 686//489 688//489 830//489 832//489 +f 831//490 689//490 687//490 833//490 +f 757//491 690//491 828//491 810//491 +f 829//492 691//492 758//492 811//492 +f 690//493 692//493 826//493 828//493 +f 827//494 693//494 691//494 829//494 +f 692//495 694//495 824//495 826//495 +f 825//496 695//496 693//496 827//496 +f 694//497 696//497 822//497 824//497 +f 823//498 697//498 695//498 825//498 +f 696//499 698//499 820//499 822//499 +f 821//500 699//500 697//500 823//500 +f 698//501 700//501 818//501 820//501 +f 819//502 701//502 699//502 821//502 +f 700//503 702//503 816//503 818//503 +f 817//504 703//504 701//504 819//504 +f 702//505 755//505 812//505 816//505 +f 813//506 756//506 703//506 817//506 +f 755//507 704//507 814//507 812//507 +f 815//508 705//508 756//508 813//508 +f 704//509 686//509 832//509 814//509 +f 833//510 687//510 705//510 815//510 +f 814//511 832//511 834//511 852//511 +f 835//512 833//512 815//512 853//512 +f 812//513 814//513 852//513 854//513 +f 853//514 815//514 813//514 855//514 +f 816//515 812//515 854//515 850//515 +f 855//516 813//516 817//516 851//516 +f 818//517 816//517 850//517 848//517 +f 851//518 817//518 819//518 849//518 +f 820//519 818//519 848//519 846//519 +f 849//520 819//520 821//520 847//520 +f 822//521 820//521 846//521 844//521 +f 847//522 821//522 823//522 845//522 +f 824//523 822//523 844//523 842//523 +f 845//524 823//524 825//524 843//524 +f 826//525 824//525 842//525 840//525 +f 843//526 825//526 827//526 841//526 +f 828//527 826//527 840//527 838//527 +f 841//528 827//528 829//528 839//528 +f 810//529 828//529 838//529 856//529 +f 839//530 829//530 811//530 857//530 +f 832//531 830//531 836//531 834//531 +f 837//532 831//532 833//532 835//532 +f 830//533 808//533 858//533 836//533 +f 859//534 809//534 831//534 837//534 +f 808//535 810//535 856//535 858//535 +f 857//536 811//536 809//536 859//536 +f 643//537 684//537 860//537 649//537 +f 861//538 685//538 643//538 649//538 +f 684//539 682//539 862//539 860//539 +f 863//540 683//540 685//540 861//540 +f 682//541 680//541 864//541 862//541 +f 865//542 681//542 683//542 863//542 +f 680//543 678//543 866//543 864//543 +f 867//544 679//544 681//544 865//544 +f 678//545 676//545 868//545 866//545 +f 869//546 677//546 679//546 867//546 +f 676//547 674//547 870//547 868//547 +f 871//548 675//548 677//548 869//548 +f 674//549 672//549 872//549 870//549 +f 873//550 673//550 675//550 871//550 +f 672//551 670//551 874//551 872//551 +f 875//552 671//552 673//552 873//552 +f 670//553 668//553 876//553 874//553 +f 877//554 669//554 671//554 875//554 +f 884//555 885//555 904//555 914//555 +f 905//556 885//556 884//556 915//556 +f 883//557 884//557 914//557 912//557 +f 915//558 884//558 883//558 913//558 +f 882//559 883//559 912//559 916//559 +f 913//560 883//560 882//560 917//560 +f 665//561 667//561 882//561 916//561 +f 882//562 667//562 666//562 917//562 +f 663//563 665//563 916//563 910//563 +f 917//564 666//564 664//564 911//564 +f 661//565 663//565 910//565 906//565 +f 911//566 664//566 662//566 907//566 +f 659//567 661//567 906//567 908//567 +f 907//568 662//568 660//568 909//568 +f 906//569 912//569 914//569 908//569 +f 915//570 913//570 907//570 909//570 +f 906//571 910//571 916//571 912//571 +f 917//572 911//572 907//572 913//572 +f 902//573 908//573 914//573 904//573 +f 915//574 909//574 903//574 905//574 +f 657//575 659//575 908//575 902//575 +f 909//576 660//576 658//576 903//576 +f 785//577 918//577 920//577 791//577 +f 921//578 919//578 786//578 792//578 +f 657//579 902//579 918//579 785//579 +f 919//580 903//580 658//580 786//580 +f 655//581 791//581 920//581 922//581 +f 921//582 792//582 656//582 923//582 +f 655//583 922//583 876//583 668//583 +f 877//584 923//584 656//584 669//584 +f 653//585 900//585 928//585 880//585 +f 929//586 901//586 653//586 880//586 +f 880//587 928//587 926//587 654//587 +f 927//588 929//588 880//588 654//588 +f 654//589 926//589 924//589 881//589 +f 925//590 927//590 654//590 881//590 +f 881//591 924//591 904//591 885//591 +f 905//592 925//592 881//592 885//592 +f 902//593 904//593 924//593 918//593 +f 925//594 905//594 903//594 919//594 +f 872//595 874//595 894//595 886//595 +f 895//596 875//596 873//596 887//596 +f 652//597 892//597 900//597 653//597 +f 901//598 893//598 652//598 653//598 +f 878//599 934//599 932//599 879//599 +f 933//600 935//600 878//600 879//600 +f 879//601 932//601 930//601 651//601 +f 931//602 933//602 879//602 651//602 +f 651//603 930//603 892//603 652//603 +f 893//604 931//604 651//604 652//604 +f 868//605 870//605 938//605 940//605 +f 939//606 871//606 869//606 941//606 +f 940//607 938//607 944//607 942//607 +f 945//608 939//608 941//608 943//608 +f 942//609 944//609 946//609 948//609 +f 947//610 945//610 943//610 949//610 +f 948//611 946//611 952//611 950//611 +f 953//612 947//612 949//612 951//612 +f 890//613 954//613 950//613 952//613 +f 951//614 955//614 891//614 953//614 +f 892//615 930//615 950//615 954//615 +f 951//616 931//616 893//616 955//616 +f 930//617 932//617 948//617 950//617 +f 949//618 933//618 931//618 951//618 +f 932//619 934//619 942//619 948//619 +f 943//620 935//620 933//620 949//620 +f 934//621 936//621 940//621 942//621 +f 941//622 937//622 935//622 943//622 +f 866//623 868//623 940//623 936//623 +f 941//624 869//624 867//624 937//624 +f 650//625 936//625 934//625 878//625 +f 935//626 937//626 650//626 878//626 +f 860//627 862//627 864//627 866//627 +f 865//628 863//628 861//628 867//628 +f 860//629 866//629 936//629 650//629 +f 937//630 867//630 861//630 650//630 +f 649//631 860//631 650//631 +f 650//632 861//632 649//632 +f 870//633 872//633 886//633 938//633 +f 887//634 873//634 871//634 939//634 +f 886//635 888//635 944//635 938//635 +f 945//636 889//636 887//636 939//636 +f 888//637 958//637 946//637 944//637 +f 947//638 959//638 889//638 945//638 +f 890//639 952//639 946//639 958//639 +f 947//640 953//640 891//640 959//640 +f 924//641 926//641 962//641 960//641 +f 963//642 927//642 925//642 961//642 +f 894//643 960//643 962//643 896//643 +f 963//644 961//644 895//644 897//644 +f 874//645 876//645 960//645 894//645 +f 961//646 877//646 875//646 895//646 +f 876//647 920//647 918//647 960//647 +f 919//648 921//648 877//648 961//648 +f 918//649 924//649 960//649 +f 961//650 925//650 919//650 +f 876//651 922//651 920//651 +f 921//652 923//652 877//652 +f 890//653 898//653 956//653 954//653 +f 957//654 899//654 891//654 955//654 +f 892//655 954//655 956//655 900//655 +f 957//656 955//656 893//656 901//656 +f 896//657 962//657 956//657 898//657 +f 957//658 963//658 897//658 899//658 +f 926//659 928//659 956//659 962//659 +f 957//660 929//660 927//660 963//660 +f 900//661 956//661 928//661 +f 929//662 957//662 901//662 +f 976//663 964//663 990//663 978//663 +f 991//664 965//664 977//664 979//664 +f 976//665 978//665 980//665 974//665 +f 981//666 979//666 977//666 975//666 +f 974//667 980//667 982//667 972//667 +f 983//668 981//668 975//668 973//668 +f 972//669 982//669 984//669 970//669 +f 985//670 983//670 973//670 971//670 +f 970//671 984//671 986//671 968//671 +f 987//672 985//672 971//672 969//672 +f 968//673 986//673 988//673 966//673 +f 989//674 987//674 969//674 967//674 +f 986//675 996//675 994//675 988//675 +f 995//676 997//676 987//676 989//676 +f 984//677 998//677 996//677 986//677 +f 997//678 999//678 985//678 987//678 +f 982//679 1000//679 998//679 984//679 +f 999//680 1001//680 983//680 985//680 +f 980//681 1002//681 1000//681 982//681 +f 1001//682 1003//682 981//682 983//682 +f 978//683 1004//683 1002//683 980//683 +f 1003//684 1005//684 979//684 981//684 +f 978//685 990//685 992//685 1004//685 +f 993//686 991//686 979//686 1005//686 +f 894//687 896//687 1020//687 1018//687 +f 1021//688 897//688 895//688 1019//688 +f 896//689 966//689 988//689 1020//689 +f 989//690 967//690 897//690 1021//690 +f 886//691 894//691 1018//691 888//691 +f 1019//692 895//692 887//692 889//692 +f 958//693 1006//693 990//693 964//693 +f 991//694 1007//694 959//694 965//694 +f 988//695 994//695 1016//695 1020//695 +f 1017//696 995//696 989//696 1021//696 +f 1014//697 1022//697 1020//697 1016//697 +f 1021//698 1023//698 1015//698 1017//698 +f 1010//699 1022//699 1014//699 1012//699 +f 1015//700 1023//700 1011//700 1013//700 +f 1008//701 1024//701 1022//701 1010//701 +f 1023//702 1025//702 1009//702 1011//702 +f 1006//703 1024//703 1008//703 1026//703 +f 1009//704 1025//704 1007//704 1027//704 +f 990//705 1006//705 1026//705 992//705 +f 1027//706 1007//706 991//706 993//706 +f 888//707 1024//707 1006//707 958//707 +f 1007//708 1025//708 889//708 959//708 +f 888//709 1018//709 1022//709 1024//709 +f 1023//710 1019//710 889//710 1025//710 +f 1018//711 1020//711 1022//711 +f 1023//712 1021//712 1019//712 +f 992//713 1026//713 1028//713 1052//713 +f 1029//714 1027//714 993//714 1053//714 +f 1026//715 1008//715 1038//715 1028//715 +f 1039//716 1009//716 1027//716 1029//716 +f 1008//717 1010//717 1036//717 1038//717 +f 1037//718 1011//718 1009//718 1039//718 +f 1010//719 1012//719 1034//719 1036//719 +f 1035//720 1013//720 1011//720 1037//720 +f 1012//721 1014//721 1032//721 1034//721 +f 1033//722 1015//722 1013//722 1035//722 +f 1014//723 1016//723 1030//723 1032//723 +f 1031//724 1017//724 1015//724 1033//724 +f 1016//725 994//725 1050//725 1030//725 +f 1051//726 995//726 1017//726 1031//726 +f 1004//727 992//727 1052//727 1040//727 +f 1053//728 993//728 1005//728 1041//728 +f 1002//729 1004//729 1040//729 1042//729 +f 1041//730 1005//730 1003//730 1043//730 +f 1000//731 1002//731 1042//731 1044//731 +f 1043//732 1003//732 1001//732 1045//732 +f 998//733 1000//733 1044//733 1046//733 +f 1045//734 1001//734 999//734 1047//734 +f 996//735 998//735 1046//735 1048//735 +f 1047//736 999//736 997//736 1049//736 +f 994//737 996//737 1048//737 1050//737 +f 1049//738 997//738 995//738 1051//738 +f 1034//739 1032//739 1056//739 1054//739 +f 1057//740 1033//740 1035//740 1055//740 +f 1054//741 1056//741 1058//741 1060//741 +f 1059//742 1057//742 1055//742 1061//742 +f 1060//743 1058//743 1064//743 1062//743 +f 1065//744 1059//744 1061//744 1063//744 +f 1062//745 1064//745 1066//745 1068//745 +f 1067//746 1065//746 1063//746 1069//746 +f 1040//747 1052//747 1062//747 1068//747 +f 1063//748 1053//748 1041//748 1069//748 +f 1028//749 1060//749 1062//749 1052//749 +f 1063//750 1061//750 1029//750 1053//750 +f 1028//751 1038//751 1054//751 1060//751 +f 1055//752 1039//752 1029//752 1061//752 +f 1034//753 1054//753 1038//753 1036//753 +f 1039//754 1055//754 1035//754 1037//754 +f 1030//755 1050//755 1056//755 1032//755 +f 1057//756 1051//756 1031//756 1033//756 +f 1048//757 1058//757 1056//757 1050//757 +f 1057//758 1059//758 1049//758 1051//758 +f 1046//759 1064//759 1058//759 1048//759 +f 1059//760 1065//760 1047//760 1049//760 +f 1044//761 1066//761 1064//761 1046//761 +f 1065//762 1067//762 1045//762 1047//762 +f 1042//763 1068//763 1066//763 1044//763 +f 1067//764 1069//764 1043//764 1045//764 +f 1040//765 1068//765 1042//765 +f 1043//766 1069//766 1041//766 +f 968//767 966//767 1080//767 1078//767 +f 1081//768 967//768 969//768 1079//768 +f 970//769 968//769 1078//769 1076//769 +f 1079//770 969//770 971//770 1077//770 +f 972//771 970//771 1076//771 1074//771 +f 1077//772 971//772 973//772 1075//772 +f 974//773 972//773 1074//773 1072//773 +f 1075//774 973//774 975//774 1073//774 +f 976//775 974//775 1072//775 1070//775 +f 1073//776 975//776 977//776 1071//776 +f 964//777 976//777 1070//777 1082//777 +f 1071//778 977//778 965//778 1083//778 +f 1070//779 1078//779 1080//779 1082//779 +f 1081//780 1079//780 1071//780 1083//780 +f 1070//781 1072//781 1076//781 1078//781 +f 1077//782 1073//782 1071//782 1079//782 +f 1072//783 1074//783 1076//783 +f 1077//784 1075//784 1073//784 +f 890//785 958//785 964//785 1082//785 +f 965//786 959//786 891//786 1083//786 +f 890//787 1082//787 1080//787 898//787 +f 1081//788 1083//788 891//788 899//788 +f 896//789 898//789 1080//789 966//789 +f 1081//790 899//790 897//790 967//790 +o Plane.001 +v 2.882756 0.000000 2.384819 +v 6.045161 0.000000 2.384819 +v 2.882756 0.000000 -0.777585 +v 6.045161 0.000000 -0.777585 +v 4.463959 2.956217 0.803617 +vn 0.0000 -1.0000 0.0000 +vn 0.0000 0.4716 -0.8818 +vn 0.0000 0.4716 0.8818 +vn 0.8818 0.4716 0.0000 +vn -0.8818 0.4716 0.0000 +usemtl None +s off +f 1084//791 1086//791 1087//791 1085//791 +f 1087//792 1086//792 1088//792 +f 1084//793 1085//793 1088//793 +f 1085//794 1087//794 1088//794 +f 1086//795 1084//795 1088//795 +o Cylinder +v -3.975720 0.000000 -3.621616 +v -3.975720 4.000000 -3.621616 +v -3.597971 0.000000 -3.726612 +v -3.597971 4.000000 -3.726612 +v -3.206998 0.000000 -3.755895 +v -3.206998 4.000000 -3.755895 +v -2.817824 0.000000 -3.708340 +v -2.817824 4.000000 -3.708340 +v -2.445406 0.000000 -3.585775 +v -2.445406 4.000000 -3.585775 +v -2.104054 0.000000 -3.392909 +v -2.104054 4.000000 -3.392909 +v -1.806888 0.000000 -3.137155 +v -1.806888 4.000000 -3.137155 +v -1.565327 0.000000 -2.828341 +v -1.565327 4.000000 -2.828341 +v -1.388654 0.000000 -2.478335 +v -1.388654 4.000000 -2.478335 +v -1.283659 0.000000 -2.100587 +v -1.283659 4.000000 -2.100587 +v -1.254376 0.000000 -1.709614 +v -1.254376 4.000000 -1.709614 +v -1.301931 0.000000 -1.320440 +v -1.301931 4.000000 -1.320440 +v -1.424496 0.000000 -0.948021 +v -1.424496 4.000000 -0.948021 +v -1.617361 0.000000 -0.606670 +v -1.617361 4.000000 -0.606670 +v -1.873116 0.000000 -0.309503 +v -1.873116 4.000000 -0.309503 +v -2.181930 0.000000 -0.067942 +v -2.181930 4.000000 -0.067942 +v -2.531936 0.000000 0.108731 +v -2.531936 4.000000 0.108731 +v -2.909684 0.000000 0.213726 +v -2.909684 4.000000 0.213726 +v -3.300658 0.000000 0.243009 +v -3.300658 4.000000 0.243009 +v -3.689832 0.000000 0.195454 +v -3.689832 4.000000 0.195454 +v -4.062251 0.000000 0.072888 +v -4.062251 4.000000 0.072888 +v -4.403602 0.000000 -0.119978 +v -4.403602 4.000000 -0.119978 +v -4.700768 0.000000 -0.375732 +v -4.700768 4.000000 -0.375732 +v -4.942329 0.000000 -0.684546 +v -4.942329 4.000000 -0.684546 +v -5.119001 0.000000 -1.034553 +v -5.119001 4.000000 -1.034553 +v -5.223997 0.000000 -1.412301 +v -5.223997 4.000000 -1.412301 +v -5.253279 0.000000 -1.803275 +v -5.253279 4.000000 -1.803275 +v -5.205724 0.000000 -2.192449 +v -5.205724 4.000000 -2.192449 +v -5.083158 0.000000 -2.564867 +v -5.083158 4.000000 -2.564867 +v -4.890292 0.000000 -2.906218 +v -4.890292 4.000000 -2.906218 +v -4.634538 0.000000 -3.203384 +v -4.634538 4.000000 -3.203384 +v -4.325724 0.000000 -3.444945 +v -4.325724 4.000000 -3.444945 +vn -0.2678 0.0000 -0.9635 +vn -0.0747 0.0000 -0.9972 +vn 0.1213 0.0000 -0.9926 +vn 0.3126 0.0000 -0.9499 +vn 0.4919 0.0000 -0.8706 +vn 0.6523 0.0000 -0.7579 +vn 0.7877 0.0000 -0.6161 +vn 0.8927 0.0000 -0.4506 +vn 0.9635 0.0000 -0.2678 +vn 0.9972 0.0000 -0.0747 +vn 0.9926 0.0000 0.1213 +vn 0.9499 0.0000 0.3126 +vn 0.8706 0.0000 0.4919 +vn 0.7579 0.0000 0.6523 +vn 0.6161 0.0000 0.7877 +vn 0.4506 0.0000 0.8927 +vn 0.2678 0.0000 0.9635 +vn 0.0747 0.0000 0.9972 +vn -0.1213 0.0000 0.9926 +vn -0.3126 0.0000 0.9499 +vn -0.4919 0.0000 0.8706 +vn -0.6523 0.0000 0.7579 +vn -0.7877 0.0000 0.6161 +vn -0.8927 0.0000 0.4506 +vn -0.9635 0.0000 0.2678 +vn -0.9972 0.0000 0.0747 +vn -0.9926 0.0000 -0.1213 +vn -0.9499 0.0000 -0.3126 +vn -0.8706 0.0000 -0.4919 +vn -0.7579 0.0000 -0.6523 +vn 0.0000 1.0000 0.0000 +vn -0.6161 0.0000 -0.7877 +vn -0.4506 0.0000 -0.8927 +vn 0.0000 -1.0000 0.0000 +usemtl None +s off +f 1089//796 1090//796 1092//796 1091//796 +f 1091//797 1092//797 1094//797 1093//797 +f 1093//798 1094//798 1096//798 1095//798 +f 1095//799 1096//799 1098//799 1097//799 +f 1097//800 1098//800 1100//800 1099//800 +f 1099//801 1100//801 1102//801 1101//801 +f 1101//802 1102//802 1104//802 1103//802 +f 1103//803 1104//803 1106//803 1105//803 +f 1105//804 1106//804 1108//804 1107//804 +f 1107//805 1108//805 1110//805 1109//805 +f 1109//806 1110//806 1112//806 1111//806 +f 1111//807 1112//807 1114//807 1113//807 +f 1113//808 1114//808 1116//808 1115//808 +f 1115//809 1116//809 1118//809 1117//809 +f 1117//810 1118//810 1120//810 1119//810 +f 1119//811 1120//811 1122//811 1121//811 +f 1121//812 1122//812 1124//812 1123//812 +f 1123//813 1124//813 1126//813 1125//813 +f 1125//814 1126//814 1128//814 1127//814 +f 1127//815 1128//815 1130//815 1129//815 +f 1129//816 1130//816 1132//816 1131//816 +f 1131//817 1132//817 1134//817 1133//817 +f 1133//818 1134//818 1136//818 1135//818 +f 1135//819 1136//819 1138//819 1137//819 +f 1137//820 1138//820 1140//820 1139//820 +f 1139//821 1140//821 1142//821 1141//821 +f 1141//822 1142//822 1144//822 1143//822 +f 1143//823 1144//823 1146//823 1145//823 +f 1145//824 1146//824 1148//824 1147//824 +f 1147//825 1148//825 1150//825 1149//825 +f 1092//826 1090//826 1152//826 1150//826 1148//826 1146//826 1144//826 1142//826 1140//826 1138//826 1136//826 1134//826 1132//826 1130//826 1128//826 1126//826 1124//826 1122//826 1120//826 1118//826 1116//826 1114//826 1112//826 1110//826 1108//826 1106//826 1104//826 1102//826 1100//826 1098//826 1096//826 1094//826 +f 1149//827 1150//827 1152//827 1151//827 +f 1151//828 1152//828 1090//828 1089//828 +f 1089//829 1091//829 1093//829 1095//829 1097//829 1099//829 1101//829 1103//829 1105//829 1107//829 1109//829 1111//829 1113//829 1115//829 1117//829 1119//829 1121//829 1123//829 1125//829 1127//829 1129//829 1131//829 1133//829 1135//829 1137//829 1139//829 1141//829 1143//829 1145//829 1147//829 1149//829 1151//829 +o Plane +v -8.000000 0.000000 8.000000 +v 8.000000 0.000000 8.000000 +v -8.000000 0.000000 -8.000000 +v 8.000000 0.000000 -8.000000 +vn 0.0000 1.0000 0.0000 +usemtl None +s off +f 1153//830 1154//830 1156//830 1155//830 diff --git a/include/dofpass.h b/include/dofpass.h new file mode 100644 index 0000000..4274c38 --- /dev/null +++ b/include/dofpass.h @@ -0,0 +1,41 @@ +#pragma once + +#include + +class Renderer; +struct RenderTarget; + +class DoFPass { +public: + DoFPass(Renderer& renderer); + ~DoFPass(); + + void render(VkCommandBuffer commandBuffer, RenderTarget* target); + + void createDescriptorSet(RenderTarget* target); + + VkRenderPass getRenderPass() const { + return renderPass_; + } + +private: + void createRenderPass(); + void createDescriptorSetLayout(); + void createPipeline(); + void createBokehImage(); + void createDescriptorSet(); + + VkRenderPass renderPass_ = nullptr; + + VkDescriptorSetLayout setLayout_ = nullptr; + + VkPipelineLayout pipelineLayout_ = nullptr; + VkPipeline pipeline_ = nullptr; + + VkImage bokehImage_ = nullptr; + VkDeviceMemory bokehMemory_ = nullptr; + VkImageView bokehImageView_ = nullptr; + VkSampler bokehSampler_ = nullptr; + + Renderer& renderer_; +}; diff --git a/include/renderer.h b/include/renderer.h index 9a51e0f..ea6af8b 100644 --- a/include/renderer.h +++ b/include/renderer.h @@ -4,6 +4,7 @@ #include "worldpass.h" #include "postpass.h" +#include "dofpass.h" struct RenderTarget { VkSurfaceKHR surface = nullptr; @@ -12,19 +13,36 @@ struct RenderTarget { VkExtent2D extent = {}; uint32_t numImages = 0, currentImage = 0; + // swapwchain VkImage* swapchainImages = nullptr; VkImageView* swapchainImageViews = nullptr; VkFramebuffer* swapchainFramebuffers = nullptr; + // offscreen color VkImage* offscreenColorImages = nullptr; VkDeviceMemory* offscreenColorMemory = nullptr; VkImageView* offscreenColorImageViews = nullptr; + // offscreen depth VkImage* offscreenDepthImages = nullptr; VkDeviceMemory* offscreenDepthMemory = nullptr; VkImageView* offscreenDepthImageViews = nullptr; VkFramebuffer* offscreenFramebuffers = nullptr; + + // near field + VkImage* nearFieldImages = nullptr; + VkDeviceMemory* nearFieldMemory = nullptr; + VkImageView* nearFieldImageViews = nullptr; + + VkFramebuffer* nearFieldFramebuffers = nullptr; + + // far field + VkImage* farFieldImages = nullptr; + VkDeviceMemory* farFieldMemory = nullptr; + VkImageView* farFieldImageViews = nullptr; + + VkFramebuffer* farFieldFramebuffers = nullptr; VkCommandBuffer* commandBuffers = nullptr; @@ -33,6 +51,7 @@ struct RenderTarget { VkFence* fences = nullptr; VkDescriptorSet* postSets = nullptr; + VkDescriptorSet* dofSets = nullptr; }; class World; @@ -65,6 +84,14 @@ public: VkDevice getDevice() const { return device_; } + + VkQueue getGraphicsQueue() const { + return graphicsQueue_; + } + + VkCommandPool getCommandPool() const { + return commandPool_; + } VkRenderPass getRenderPass() const { return presentationRenderPass_; @@ -114,4 +141,5 @@ private: WorldPass* worldPass_ = nullptr; PostPass* postPass_ = nullptr; + DoFPass* dofPass_ = nullptr; }; diff --git a/include/stb_image.h b/include/stb_image.h new file mode 100644 index 0000000..d9c21bc --- /dev/null +++ b/include/stb_image.h @@ -0,0 +1,7462 @@ +/* stb_image - v2.19 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan + Dave Moore Roy Eltham Hayaki Saito Nathan Reed + Won Chun Luke Graham Johan Duparc Nick Verigakis + the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar + Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex + Ryamond Barbiero Paul Du Bois Engin Manap github:grim210 + Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw + Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus + Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo + Christian Floisand Kevin Schmidt github:darealshinji + Blazej Dariusz Roszkowski github:Michaelangel007 +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*) user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) || !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) + c = stbi__zreceive(a,3)+3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v >= 0 && v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->history = (stbi_uc *) stbi__malloc(g->w * g->h); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "tranparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to teh color that was there the previous frame. + memset( g->out, 0x00, 4 * g->w * g->h ); + memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent) + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispoase of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + out = (stbi_uc*) STBI_REALLOC( out, layers * stride ); + if (delays) { + *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers ); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/shaders/gfield.frag b/shaders/gfield.frag new file mode 100644 index 0000000..b5f8850 --- /dev/null +++ b/shaders/gfield.frag @@ -0,0 +1,31 @@ +#version 460 core + +layout(location = 0) in vec2 inUV; +layout(location = 1) in vec2 inPos; +layout(location = 2) in float cocRadius; + +layout(location = 0) out vec4 outColor; + +layout(binding = 0) uniform sampler2D bokehSampler; +layout(binding = 1) uniform sampler2D sceneSampler; +layout(binding = 2) uniform sampler2D depthSampler; + +layout(push_constant) uniform PushConstants { + vec4 dpack; +} pushConstants; + +void main() { + const vec2 res = vec2(pushConstants.dpack[2], pushConstants.dpack[3]); + + outColor = texture(sceneSampler, vec2(inPos.x / res.x, inPos.y / res.y)) * texture(bokehSampler, inUV); + outColor.a = (cocRadius / 9000.0); + + if(pushConstants.dpack[0] == 0) { + if(texture(depthSampler, gl_FragCoord.xy / res).r < pushConstants.dpack[1]) + discard; + } else { + if(texture(depthSampler, gl_FragCoord.xy / res).r > pushConstants.dpack[1]) + discard; + } +} + diff --git a/shaders/gfield.vert b/shaders/gfield.vert new file mode 100644 index 0000000..84d5e5c --- /dev/null +++ b/shaders/gfield.vert @@ -0,0 +1,45 @@ +#version 460 core + +layout(location = 0) out vec2 outUV; +layout(location = 1) out vec2 outLoc; +layout(location = 2) out float cocRadius; + +layout(binding = 2) uniform sampler2D depthSampler; + +layout(push_constant) uniform PushConstants { + vec4 dpack; +} pushConstants; + +void main() { + const vec2 res = vec2(pushConstants.dpack[2], pushConstants.dpack[3]); + + const vec2 loc = vec2((gl_InstanceIndex % int(res.x)), ((gl_InstanceIndex / int(res.x)) % int(res.y))); + outLoc = loc; + + const float depth = texture(depthSampler, vec2(loc.x / res.x, loc.y / res.y)).r; + float size = 0.0; + + // dpack[0] is the field we are drawing (far = 0, near = 1) + if(pushConstants.dpack[0] == 0) { + if(depth > pushConstants.dpack[1]) + size = (depth - pushConstants.dpack[1]) * 500.0; + } else { + if(depth < pushConstants.dpack[1]) + size = (pushConstants.dpack[1] - depth) * 500.0; + } + + cocRadius = size; + + outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2); + + vec2 pos = outUV * 2.0 + -1.0; + pos *= vec2(1.0 / res.x, 1.0 / res.y); + pos *= min(size, 32.0); + pos.x -= 1; + pos.y -= 1; + pos.x += loc.x / (res.x / 2.0); + pos.y += loc.y / (res.y / 2.0); + + gl_Position = vec4(pos, 0.0, 1.0); +} + diff --git a/shaders/post.frag b/shaders/post.frag index d800253..f12a152 100644 --- a/shaders/post.frag +++ b/shaders/post.frag @@ -4,8 +4,29 @@ layout(location = 0) in vec2 inUV; layout(location = 0) out vec4 outColor; -layout(binding = 0) uniform sampler2D offscreenSampler; +layout(binding = 0) uniform sampler2D sceneSampler; +layout(binding = 1) uniform sampler2D depthSampler; +layout(binding = 2) uniform sampler2D nearFieldSampler; +layout(binding = 3) uniform sampler2D farFieldSampler; -void main() { - outColor = texture(offscreenSampler, inUV); +void main() { + vec3 sceneColor = texture(sceneSampler, inUV).rgb; + + // alpha divide reconstruction + vec3 farColor = texture(farFieldSampler, inUV).rgb / max(texture(farFieldSampler, inUV).a, 0.0001) * 0.02; + vec3 nearColor = texture(nearFieldSampler, inUV).rgb / max(texture(nearFieldSampler, inUV).a, 0.0001) * 0.02; + + // read coc stored in the alpha channel + float coc = texture(farFieldSampler, inUV).a; + + // transistion between out of focus and regular scene + vec3 farColorBlurred = mix(sceneColor, farColor, clamp(coc, 0.0, 1.0)); + + // smoother transistion between the normal scene and the "out of focus" portions + farColorBlurred = mix(sceneColor, farColorBlurred, clamp(0.5 * coc + 1.0, 0.0, 1.0)); + + //float coc2 = texture(nearFieldSampler, inUV).a; + //vec3 finalColor = mix(farColorBlurred, nearColor, clamp(clamp(-coc2 - 1.0, 0.0, 1.0) + texture(nearFieldSampler, inUV).a * 8.0, 0.0, 1.0)); + + outColor = vec4(farColorBlurred, 1.0); } diff --git a/shaders/post.vert b/shaders/post.vert index de04ddb..55f1e94 100644 --- a/shaders/post.vert +++ b/shaders/post.vert @@ -4,6 +4,6 @@ layout(location = 0) out vec2 outUV; void main() { outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2); - gl_Position = vec4(outUV * 2.0f + -1.0f, 0.0f, 1.0f); + gl_Position = vec4(outUV * 2.0 + -1.0, 0.0, 1.0); } diff --git a/src/dofpass.cpp b/src/dofpass.cpp new file mode 100644 index 0000000..f848093 --- /dev/null +++ b/src/dofpass.cpp @@ -0,0 +1,499 @@ +#include "dofpass.h" + +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +#include "renderer.h" + +DoFPass::DoFPass(Renderer& renderer) : renderer_(renderer) { + createRenderPass(); + createDescriptorSetLayout(); + createPipeline(); + createBokehImage(); +} + +DoFPass::~DoFPass() { + vkDestroySampler(renderer_.getDevice(), bokehSampler_, nullptr); + vkDestroyImageView(renderer_.getDevice(), bokehImageView_, nullptr); + vkFreeMemory(renderer_.getDevice(), bokehMemory_, nullptr); + vkDestroyImage(renderer_.getDevice(), bokehImage_, nullptr); + + vkDestroyPipeline(renderer_.getDevice(), pipeline_, nullptr); + vkDestroyPipelineLayout(renderer_.getDevice(), pipelineLayout_, nullptr); + + vkDestroyDescriptorSetLayout(renderer_.getDevice(), setLayout_, nullptr); + + vkDestroyRenderPass(renderer_.getDevice(), renderPass_, nullptr); +} + +void DoFPass::render(VkCommandBuffer commandBuffer, RenderTarget* target) { + VkViewport viewport = {}; + viewport.width = target->extent.width / 2; + viewport.height = target->extent.height / 2; + viewport.maxDepth = 1.0f; + + vkCmdSetViewport(commandBuffer, 0, 1, &viewport); + + VkRect2D scissor = {}; + scissor.extent.width = target->extent.width / 2; + scissor.extent.height = target->extent.height / 2; + + vkCmdSetScissor(commandBuffer, 0, 1, &scissor); + + VkClearValue clearColor = {}; + + VkRenderPassBeginInfo renderPassBeginInfo = {}; + renderPassBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + renderPassBeginInfo.framebuffer = target->farFieldFramebuffers[target->currentImage]; + renderPassBeginInfo.renderPass = renderPass_; + renderPassBeginInfo.renderArea.extent.width = target->extent.width / 2; + renderPassBeginInfo.renderArea.extent.height = target->extent.height / 2; + renderPassBeginInfo.clearValueCount = 1; + renderPassBeginInfo.pClearValues = &clearColor; + + // far field + vkCmdBeginRenderPass(commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); + + glm::vec4 dpack; + dpack[0] = 0; + dpack[1] = 0.9581; + dpack[2] = target->extent.width / 2; + dpack[3] = target->extent.height / 2; + + vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_); + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout_, 0, 1, &target->dofSets[target->currentImage], 0, nullptr); + + vkCmdPushConstants(commandBuffer, pipelineLayout_, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(glm::vec4), &dpack); + + vkCmdDraw(commandBuffer, 3, (target->extent.width / 2) * (target->extent.height / 2), 0, 0); + + vkCmdEndRenderPass(commandBuffer); + + //near field + renderPassBeginInfo.framebuffer = target->nearFieldFramebuffers[target->currentImage]; + + vkCmdBeginRenderPass(commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); + + vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_); + vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout_, 0, 1, &target->dofSets[target->currentImage], 0, nullptr); + + dpack[0] = 1; + dpack[1] = 0.9581; + + vkCmdPushConstants(commandBuffer, pipelineLayout_, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(glm::vec4), &dpack); + + //FIXME: near field is bugged + //vkCmdDraw(commandBuffer, 3, (target->extent.width / 2) * (target->extent.height / 2), 0, 0); + + vkCmdEndRenderPass(commandBuffer); +} + +void DoFPass::createDescriptorSet(RenderTarget* target) { + VkDescriptorSetAllocateInfo allocInfo = {}; + allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + allocInfo.descriptorPool = renderer_.getDescriptorPool(); + allocInfo.descriptorSetCount = target->numImages; + + // FIXME: lol what + auto layouts = new VkDescriptorSetLayout[target->numImages]; + for(uint32_t i = 0; i < target->numImages; i++) + layouts[i] = setLayout_; + + allocInfo.pSetLayouts = layouts; + + target->dofSets = new VkDescriptorSet[target->numImages]; + vkAllocateDescriptorSets(renderer_.getDevice(), &allocInfo, target->dofSets); + + delete[] layouts; + + for(uint32_t i = 0; i < target->numImages; i++) { + VkDescriptorImageInfo bokehImageInfo = {}; + bokehImageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + bokehImageInfo.imageView = bokehImageView_; + bokehImageInfo.sampler = bokehSampler_; + + VkDescriptorImageInfo sceneImageInfo = {}; + sceneImageInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + sceneImageInfo.imageView = target->offscreenColorImageViews[i]; + sceneImageInfo.sampler = bokehSampler_; + + VkDescriptorImageInfo depthImageInfo = {}; + depthImageInfo.imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;; + depthImageInfo.imageView = target->offscreenDepthImageViews[i]; + depthImageInfo.sampler = bokehSampler_; + + VkWriteDescriptorSet bokehDescriptorWrite = {}; + bokehDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + bokehDescriptorWrite.descriptorCount = 1; + bokehDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + bokehDescriptorWrite.dstSet = target->dofSets[i]; + bokehDescriptorWrite.pImageInfo = &bokehImageInfo; + + VkWriteDescriptorSet sceneDescriptorWrite = {}; + sceneDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + sceneDescriptorWrite.descriptorCount = 1; + sceneDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + sceneDescriptorWrite.dstBinding = 1; + sceneDescriptorWrite.dstSet = target->dofSets[i]; + sceneDescriptorWrite.pImageInfo = &sceneImageInfo; + + VkWriteDescriptorSet depthDescriptorWrite = {}; + depthDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + depthDescriptorWrite.descriptorCount = 1; + depthDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + depthDescriptorWrite.dstBinding = 2; + depthDescriptorWrite.dstSet = target->dofSets[i]; + depthDescriptorWrite.pImageInfo = &depthImageInfo; + + const std::array descriptorWrites = { + bokehDescriptorWrite, + sceneDescriptorWrite, + depthDescriptorWrite + }; + + vkUpdateDescriptorSets(renderer_.getDevice(), descriptorWrites.size(), descriptorWrites.data(), 0, nullptr); + } +} + +void DoFPass::createRenderPass() { + VkAttachmentDescription colorAttachment = {}; + colorAttachment.format = VK_FORMAT_R32G32B32A32_SFLOAT; + colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT; + colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + colorAttachment.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + + VkAttachmentReference colorAttachmentRef = {}; + colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + + VkSubpassDescription subpass = {}; + subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &colorAttachmentRef; + + VkRenderPassCreateInfo renderPassInfo = {}; + renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + renderPassInfo.attachmentCount = 1; + renderPassInfo.pAttachments = &colorAttachment; + renderPassInfo.subpassCount = 1; + renderPassInfo.pSubpasses = &subpass; + + vkCreateRenderPass(renderer_.getDevice(), &renderPassInfo, nullptr, &renderPass_); +} + +void DoFPass::createDescriptorSetLayout() { + VkDescriptorSetLayoutBinding bokehSamplerBinding = {}; + bokehSamplerBinding.descriptorCount = 1; + bokehSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + bokehSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + + VkDescriptorSetLayoutBinding sceneSamplerBinding = {}; + sceneSamplerBinding.binding = 1; + sceneSamplerBinding.descriptorCount = 1; + sceneSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + sceneSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + + VkDescriptorSetLayoutBinding depthSamplerBinding = {}; + depthSamplerBinding.binding = 2; + depthSamplerBinding.descriptorCount = 1; + depthSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + depthSamplerBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; + + const std::array bindings = { + bokehSamplerBinding, + sceneSamplerBinding, + depthSamplerBinding + }; + + VkDescriptorSetLayoutCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + createInfo.bindingCount = bindings.size(); + createInfo.pBindings = bindings.data(); + + vkCreateDescriptorSetLayout(renderer_.getDevice(), &createInfo, nullptr, &setLayout_); +} + +void DoFPass::createPipeline() { + VkShaderModule vertShaderModule = renderer_.createShader("shaders/gfield.vert.spv"); + VkShaderModule fragShaderModule = renderer_.createShader("shaders/gfield.frag.spv"); + + VkPipelineShaderStageCreateInfo vertShaderStageInfo = {}; + vertShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT; + vertShaderStageInfo.module = vertShaderModule; + vertShaderStageInfo.pName = "main"; + + VkPipelineShaderStageCreateInfo fragShaderStageInfo = {}; + fragShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT; + fragShaderStageInfo.module = fragShaderModule; + fragShaderStageInfo.pName = "main"; + + const std::array shaderStages = {vertShaderStageInfo, fragShaderStageInfo}; + + VkPipelineVertexInputStateCreateInfo vertexInputInfo = {}; + vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + + VkPipelineInputAssemblyStateCreateInfo inputAssembly = {}; + inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + + VkPipelineViewportStateCreateInfo viewportState = {}; + viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewportState.viewportCount = 1; + viewportState.scissorCount = 1; + + VkPipelineRasterizationStateCreateInfo rasterizer = {}; + rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterizer.polygonMode = VK_POLYGON_MODE_FILL; + rasterizer.lineWidth = 1.0f; + rasterizer.cullMode = VK_CULL_MODE_FRONT_BIT; + rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; + + VkPipelineMultisampleStateCreateInfo multisampling = {}; + multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + + VkPipelineColorBlendAttachmentState colorBlendAttachment = {}; + colorBlendAttachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + colorBlendAttachment.blendEnable = VK_TRUE; + colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD; + colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; + colorBlendAttachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE; + colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD; + colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + colorBlendAttachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + + VkPipelineColorBlendStateCreateInfo colorBlending = {}; + colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + colorBlending.attachmentCount = 1; + colorBlending.pAttachments = &colorBlendAttachment; + + const std::array dynamicStates = { + VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_SCISSOR + }; + + VkPipelineDynamicStateCreateInfo dynamicState = {}; + dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamicState.dynamicStateCount = dynamicStates.size(); + dynamicState.pDynamicStates = dynamicStates.data(); + + VkPushConstantRange pushConstant = {}; + pushConstant.size = sizeof(glm::vec4); + pushConstant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; + + VkPipelineLayoutCreateInfo pipelineLayoutInfo = {}; + pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipelineLayoutInfo.setLayoutCount = 1; + pipelineLayoutInfo.pSetLayouts = &setLayout_; + pipelineLayoutInfo.pushConstantRangeCount = 1; + pipelineLayoutInfo.pPushConstantRanges = &pushConstant; + + vkCreatePipelineLayout(renderer_.getDevice(), &pipelineLayoutInfo, nullptr, &pipelineLayout_); + + VkGraphicsPipelineCreateInfo pipelineInfo = {}; + pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipelineInfo.stageCount = shaderStages.size(); + pipelineInfo.pStages = shaderStages.data(); + pipelineInfo.pVertexInputState = &vertexInputInfo; + pipelineInfo.pInputAssemblyState = &inputAssembly; + pipelineInfo.pViewportState = &viewportState; + pipelineInfo.pRasterizationState = &rasterizer; + pipelineInfo.pMultisampleState = &multisampling; + pipelineInfo.pColorBlendState = &colorBlending; + pipelineInfo.pDynamicState = &dynamicState; + pipelineInfo.layout = pipelineLayout_; + pipelineInfo.renderPass = renderPass_; + + vkCreateGraphicsPipelines(renderer_.getDevice(), nullptr, 1, &pipelineInfo, nullptr, &pipeline_); + + vkDestroyShaderModule(renderer_.getDevice(), fragShaderModule, nullptr); + vkDestroyShaderModule(renderer_.getDevice(), vertShaderModule, nullptr); +} + +void DoFPass::createBokehImage() { + int width, height, channels; + stbi_uc* pixels = stbi_load("data/bokeh.png", &width, &height, &channels, STBI_rgb_alpha); + + VkImageCreateInfo imageCreateInfo = {}; + imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; + imageCreateInfo.extent.width = width; + imageCreateInfo.extent.height = height; + imageCreateInfo.extent.depth = 1; + imageCreateInfo.mipLevels = 1; + imageCreateInfo.arrayLayers = 1; + imageCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; + imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + + vkCreateImage(renderer_.getDevice(), &imageCreateInfo, nullptr, &bokehImage_); + + VkMemoryRequirements memRequirements; + vkGetImageMemoryRequirements(renderer_.getDevice(), bokehImage_, &memRequirements); + + VkMemoryAllocateInfo allocInfo = {}; + allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + allocInfo.allocationSize = memRequirements.size; + allocInfo.memoryTypeIndex = renderer_.findMemoryType(memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + vkAllocateMemory(renderer_.getDevice(), &allocInfo, nullptr, &bokehMemory_); + vkBindImageMemory(renderer_.getDevice(), bokehImage_, bokehMemory_, 0); + + VkBuffer stagingBuffer; + VkDeviceMemory stagingMemory; + + VkBufferCreateInfo bufferInfo = {}; + bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + bufferInfo.size = width * height * 4; + bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + + vkCreateBuffer(renderer_.getDevice(), &bufferInfo, nullptr, &stagingBuffer); + + vkGetBufferMemoryRequirements(renderer_.getDevice(), stagingBuffer, &memRequirements); + + allocInfo = {}; + allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + allocInfo.allocationSize = memRequirements.size; + allocInfo.memoryTypeIndex = renderer_.findMemoryType(memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + vkAllocateMemory(renderer_.getDevice(), &allocInfo, nullptr, &stagingMemory); + vkBindBufferMemory(renderer_.getDevice(), stagingBuffer, stagingMemory, 0); + + void* data; + vkMapMemory(renderer_.getDevice(), stagingMemory, 0, width * height * 4, 0, &data); + memcpy(data, pixels, width * height * 4); + vkUnmapMemory(renderer_.getDevice(), stagingMemory); + + stbi_image_free(pixels); + + VkCommandBufferAllocateInfo bufferAllocateInfo = {}; + bufferAllocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + bufferAllocateInfo.commandPool = renderer_.getCommandPool(); + bufferAllocateInfo.commandBufferCount = 1; + + VkCommandBuffer commandBuffer = nullptr; + vkAllocateCommandBuffers(renderer_.getDevice(), &bufferAllocateInfo, &commandBuffer); + + VkCommandBufferBeginInfo beginInfo = {}; + beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + + vkBeginCommandBuffer(commandBuffer, &beginInfo); + + // change layout to transfer dst + { + VkImageMemoryBarrier imageMemoryBarrier = {}; + imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + imageMemoryBarrier.image = bokehImage_; + imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + imageMemoryBarrier.subresourceRange.layerCount = 1; + imageMemoryBarrier.subresourceRange.levelCount = 1; + + vkCmdPipelineBarrier( + commandBuffer, + VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, + 0, nullptr, + 0, nullptr, + 1, &imageMemoryBarrier); + } + + VkBufferImageCopy region = {}; + region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.layerCount = 1; + region.imageExtent.width = width; + region.imageExtent.height = height; + region.imageExtent.depth = 1; + + vkCmdCopyBufferToImage( + commandBuffer, + stagingBuffer, + bokehImage_, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + 1, + ®ion + ); + + // change layout to shader read only + { + VkImageMemoryBarrier imageMemoryBarrier = {}; + imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + imageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + imageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + imageMemoryBarrier.image = bokehImage_; + imageMemoryBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + imageMemoryBarrier.subresourceRange.layerCount = 1; + imageMemoryBarrier.subresourceRange.levelCount = 1; + + vkCmdPipelineBarrier( + commandBuffer, + VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + 0, + 0, nullptr, + 0, nullptr, + 1, &imageMemoryBarrier); + } + + vkEndCommandBuffer(commandBuffer); + + VkSubmitInfo submitInfo = {}; + submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &commandBuffer; + + VkFenceCreateInfo fenceCreateInfo = {}; + fenceCreateInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + + VkFence fence = nullptr; + vkCreateFence(renderer_.getDevice(), &fenceCreateInfo, nullptr, &fence); + + vkQueueSubmit(renderer_.getGraphicsQueue(), 1, &submitInfo, fence); + + vkWaitForFences(renderer_.getDevice(), 1, &fence, VK_TRUE, -1); + vkDestroyFence(renderer_.getDevice(), fence, nullptr); + + vkFreeCommandBuffers(renderer_.getDevice(), renderer_.getCommandPool(), 1, &commandBuffer); + + vkFreeMemory(renderer_.getDevice(), stagingMemory, nullptr); + vkDestroyBuffer(renderer_.getDevice(), stagingBuffer, nullptr); + + VkImageViewCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + createInfo.image = bokehImage_; + createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; + createInfo.format = VK_FORMAT_R8G8B8A8_UNORM; + createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + createInfo.subresourceRange.levelCount = 1; + createInfo.subresourceRange.layerCount = 1; + + vkCreateImageView(renderer_.getDevice(), &createInfo, nullptr, &bokehImageView_); + + VkSamplerCreateInfo samplerInfo = {}; + samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + samplerInfo.magFilter = VK_FILTER_LINEAR; + samplerInfo.minFilter = VK_FILTER_LINEAR; + samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; + samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK; + samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + + vkCreateSampler(renderer_.getDevice(), &samplerInfo, nullptr, &bokehSampler_); +} diff --git a/src/main.cpp b/src/main.cpp index 381e9e9..20a66fa 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -101,21 +101,27 @@ Mesh* loadMesh(const char* path) { Assimp::Importer importer; const aiScene* scene = importer.ReadFile(path, aiProcess_Triangulate); - aiMesh* m = scene->mMeshes[0]; Mesh* mesh = new Mesh(); - for(unsigned int i = 0; i < m->mNumVertices; i++) { - Vertex vertex; - vertex.position = glm::vec3(m->mVertices[i].x, m->mVertices[i].y, m->mVertices[i].z); - vertex.normal = glm::vec3(m->mNormals[i].x, m->mNormals[i].y, m->mNormals[i].z); + unsigned int indexOffset = 0; + for(unsigned mi = 0; mi < scene->mNumMeshes; mi++) { + aiMesh* m = scene->mMeshes[mi]; + + for(unsigned int i = 0; i < m->mNumVertices; i++) { + Vertex vertex; + vertex.position = glm::vec3(m->mVertices[i].x, m->mVertices[i].y, m->mVertices[i].z); + vertex.normal = glm::vec3(m->mNormals[i].x, m->mNormals[i].y, m->mNormals[i].z); - mesh->vertices.push_back(vertex); - } + mesh->vertices.push_back(vertex); + } - for(unsigned int i = 0; i < m->mNumFaces; i++) { - aiFace face = m->mFaces[i]; - for(unsigned int j = 0; j < face.mNumIndices; j++) - mesh->indices.push_back(face.mIndices[j]); + for(unsigned int i = 0; i < m->mNumFaces; i++) { + aiFace face = m->mFaces[i]; + for(unsigned int j = 0; j < face.mNumIndices; j++) + mesh->indices.push_back(indexOffset + face.mIndices[j]); + } + + indexOffset += m->mNumVertices; } renderer->fillMeshBuffers(mesh); @@ -228,12 +234,12 @@ int main(int argc, char* argv[]) { world.lights.push_back(light); Camera camera; - camera.position.z = 3; + camera.position = {5.0, 5.0, 5.0}; if(cinematicMode) cinematic = loadCinematic(argv[2]); else - world.meshes.push_back(loadMesh("data/suzanne.obj")); + world.meshes.push_back(loadMesh("data/scene.obj")); float currentTime = 0.0f, lastTime = 0.0f; Shot* currentShot = nullptr; @@ -262,7 +268,7 @@ int main(int argc, char* argv[]) { target = renderer->createSurfaceRenderTarget(surface, target); } - if(event.type == SDL_KEYDOWN && event.key.keysym.scancode == SDL_SCANCODE_F12) { + if(event.type == SDL_KEYDOWN && event.key.keysym.scancode == SDL_SCANCODE_F7) { renderer->takeScreenshot("screenshot.ppm", target); } } diff --git a/src/postpass.cpp b/src/postpass.cpp index 05512f5..bca810f 100644 --- a/src/postpass.cpp +++ b/src/postpass.cpp @@ -44,19 +44,65 @@ void PostPass::createDescriptorSet(RenderTarget* target) { delete[] layouts; for(uint32_t i = 0; i < target->numImages; i++) { - VkDescriptorImageInfo imageInfo = {}; - imageInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - imageInfo.imageView = target->offscreenColorImageViews[i]; - imageInfo.sampler = offscreenSampler_; + VkDescriptorImageInfo sceneImageInfo = {}; + sceneImageInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + sceneImageInfo.imageView = target->offscreenColorImageViews[i]; + sceneImageInfo.sampler = offscreenSampler_; + + VkDescriptorImageInfo depthImageInfo = {}; + depthImageInfo.imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;; + depthImageInfo.imageView = target->offscreenDepthImageViews[i]; + depthImageInfo.sampler = offscreenSampler_; + + VkDescriptorImageInfo nearFieldImageInfo = {}; + nearFieldImageInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + nearFieldImageInfo.imageView = target->nearFieldImageViews[i]; + nearFieldImageInfo.sampler = offscreenSampler_; + + VkDescriptorImageInfo farFieldImageInfo = {}; + farFieldImageInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + farFieldImageInfo.imageView = target->farFieldImageViews[i]; + farFieldImageInfo.sampler = offscreenSampler_; - VkWriteDescriptorSet descriptorWrite = {}; - descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptorWrite.descriptorCount = 1; - descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - descriptorWrite.dstSet = target->postSets[i]; - descriptorWrite.pImageInfo = &imageInfo; + VkWriteDescriptorSet sceneDescriptorWrite = {}; + sceneDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + sceneDescriptorWrite.descriptorCount = 1; + sceneDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + sceneDescriptorWrite.dstSet = target->postSets[i]; + sceneDescriptorWrite.pImageInfo = &sceneImageInfo; + + VkWriteDescriptorSet depthDescriptorWrite = {}; + depthDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + depthDescriptorWrite.descriptorCount = 1; + depthDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + depthDescriptorWrite.dstBinding = 1; + depthDescriptorWrite.dstSet = target->postSets[i]; + depthDescriptorWrite.pImageInfo = &depthImageInfo; + + VkWriteDescriptorSet nearFieldDescriptorWrite = {}; + nearFieldDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + nearFieldDescriptorWrite.descriptorCount = 1; + nearFieldDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + nearFieldDescriptorWrite.dstBinding = 2; + nearFieldDescriptorWrite.dstSet = target->postSets[i]; + nearFieldDescriptorWrite.pImageInfo = &nearFieldImageInfo; + + VkWriteDescriptorSet farFieldDescriptorWrite = {}; + farFieldDescriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + farFieldDescriptorWrite.descriptorCount = 1; + farFieldDescriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + farFieldDescriptorWrite.dstBinding = 3; + farFieldDescriptorWrite.dstSet = target->postSets[i]; + farFieldDescriptorWrite.pImageInfo = &farFieldImageInfo; + + const std::array descriptorWrites = { + sceneDescriptorWrite, + depthDescriptorWrite, + nearFieldDescriptorWrite, + farFieldDescriptorWrite + }; - vkUpdateDescriptorSets(renderer_.getDevice(), 1, &descriptorWrite, 0, nullptr); + vkUpdateDescriptorSets(renderer_.getDevice(), descriptorWrites.size(), descriptorWrites.data(), 0, nullptr); } } @@ -65,11 +111,36 @@ void PostPass::createDescriptorSetLayout() { offscreenSamplerBinding.descriptorCount = 1; offscreenSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; offscreenSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + + VkDescriptorSetLayoutBinding depthSamplerBinding = {}; + depthSamplerBinding.binding = 1; + depthSamplerBinding.descriptorCount = 1; + depthSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + depthSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + VkDescriptorSetLayoutBinding nearFieldSamplerBinding = {}; + nearFieldSamplerBinding.binding = 2; + nearFieldSamplerBinding.descriptorCount = 1; + nearFieldSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + nearFieldSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + + VkDescriptorSetLayoutBinding farFieldSamplerBinding = {}; + farFieldSamplerBinding.binding = 3; + farFieldSamplerBinding.descriptorCount = 1; + farFieldSamplerBinding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + farFieldSamplerBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + + const std::array bindings = { + offscreenSamplerBinding, + depthSamplerBinding, + nearFieldSamplerBinding, + farFieldSamplerBinding + }; + VkDescriptorSetLayoutCreateInfo createInfo = {}; createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - createInfo.bindingCount = 1; - createInfo.pBindings = &offscreenSamplerBinding; + createInfo.bindingCount = bindings.size(); + createInfo.pBindings = bindings.data(); vkCreateDescriptorSetLayout(renderer_.getDevice(), &createInfo, nullptr, &setLayout_); } diff --git a/src/renderer.cpp b/src/renderer.cpp index 6b3eb94..cc1f70c 100644 --- a/src/renderer.cpp +++ b/src/renderer.cpp @@ -25,11 +25,13 @@ Renderer::Renderer() { worldPass_ = new WorldPass(*this); postPass_ = new PostPass(*this); + dofPass_ = new DoFPass(*this); } Renderer::~Renderer() { vkDeviceWaitIdle(device_); + delete dofPass_; delete postPass_; delete worldPass_; @@ -74,7 +76,12 @@ void Renderer::render(World& world, Camera& camera, RenderTarget* target) { vkCmdSetScissor(commandBuffer, 0, 1, &scissor); worldPass_->render(commandBuffer, world, camera, target); - + dofPass_->render(commandBuffer, target); + + // reset after dof pass + vkCmdSetViewport(commandBuffer, 0, 1, &viewport); + vkCmdSetScissor(commandBuffer, 0, 1, &scissor); + VkClearValue clearColor = {}; VkRenderPassBeginInfo renderPassBeginInfo = {}; @@ -188,6 +195,14 @@ RenderTarget* Renderer::createSurfaceRenderTarget(VkSurfaceKHR surface, RenderTa target->offscreenDepthMemory = new VkDeviceMemory[swapchainImageCount]; target->offscreenDepthImageViews = new VkImageView[swapchainImageCount]; target->offscreenFramebuffers = new VkFramebuffer[swapchainImageCount]; + target->nearFieldImages = new VkImage[swapchainImageCount]; + target->nearFieldMemory = new VkDeviceMemory[swapchainImageCount]; + target->nearFieldImageViews = new VkImageView[swapchainImageCount]; + target->nearFieldFramebuffers = new VkFramebuffer[swapchainImageCount]; + target->farFieldImages = new VkImage[swapchainImageCount]; + target->farFieldMemory = new VkDeviceMemory[swapchainImageCount]; + target->farFieldImageViews = new VkImageView[swapchainImageCount]; + target->farFieldFramebuffers = new VkFramebuffer[swapchainImageCount]; for(uint32_t i = 0; i < swapchainImageCount; i++) { // swapchain image view { @@ -273,7 +288,7 @@ RenderTarget* Renderer::createSurfaceRenderTarget(VkSurfaceKHR surface, RenderTa imageCreateInfo.arrayLayers = 1; imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; - imageCreateInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; + imageCreateInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; vkCreateImage(device_, &imageCreateInfo, nullptr, &target->offscreenDepthImages[i]); @@ -321,9 +336,124 @@ RenderTarget* Renderer::createSurfaceRenderTarget(VkSurfaceKHR surface, RenderTa vkCreateFramebuffer(device_, &framebufferInfo, nullptr, &target->offscreenFramebuffers[i]); } + + // near field color + { + VkImageCreateInfo imageCreateInfo = {}; + imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; + imageCreateInfo.format = VK_FORMAT_R32G32B32A32_SFLOAT; + imageCreateInfo.extent.width = target->extent.width / 2; + imageCreateInfo.extent.height = target->extent.height / 2; + imageCreateInfo.extent.depth = 1; + imageCreateInfo.mipLevels = 1; + imageCreateInfo.arrayLayers = 1; + imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + vkCreateImage(device_, &imageCreateInfo, nullptr, &target->nearFieldImages[i]); + + VkMemoryRequirements memoryRequirements = {}; + vkGetImageMemoryRequirements(device_, target->nearFieldImages[i], &memoryRequirements); + + VkMemoryAllocateInfo allocateInfo = {}; + allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + allocateInfo.allocationSize = memoryRequirements.size; + allocateInfo.memoryTypeIndex = findMemoryType(memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + vkAllocateMemory(device_, &allocateInfo, nullptr, &target->nearFieldMemory[i]); + vkBindImageMemory(device_, target->nearFieldImages[i], target->nearFieldMemory[i], 0); + } + + // near field image view + { + VkImageViewCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + createInfo.image = target->nearFieldImages[i]; + createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; + createInfo.format = VK_FORMAT_R32G32B32A32_SFLOAT; + createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + createInfo.subresourceRange.levelCount = 1; + createInfo.subresourceRange.layerCount = 1; + + vkCreateImageView(device_, &createInfo, nullptr, &target->nearFieldImageViews[i]); + } + + // near field framebuffer + { + VkFramebufferCreateInfo framebufferInfo = {}; + framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + framebufferInfo.renderPass = dofPass_->getRenderPass(); + framebufferInfo.attachmentCount = 1; + framebufferInfo.pAttachments = &target->nearFieldImageViews[i]; + framebufferInfo.width = target->extent.width / 2; + framebufferInfo.height = target->extent.height / 2; + framebufferInfo.layers = 1; + + vkCreateFramebuffer(device_, &framebufferInfo, nullptr, &target->nearFieldFramebuffers[i]); + } + + // far field color + { + VkImageCreateInfo imageCreateInfo = {}; + imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageCreateInfo.imageType = VK_IMAGE_TYPE_2D; + imageCreateInfo.format = VK_FORMAT_R32G32B32A32_SFLOAT; + imageCreateInfo.extent.width = target->extent.width / 2; + imageCreateInfo.extent.height = target->extent.height / 2; + imageCreateInfo.extent.depth = 1; + imageCreateInfo.mipLevels = 1; + imageCreateInfo.arrayLayers = 1; + imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageCreateInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + + vkCreateImage(device_, &imageCreateInfo, nullptr, &target->farFieldImages[i]); + + VkMemoryRequirements memoryRequirements = {}; + vkGetImageMemoryRequirements(device_, target->farFieldImages[i], &memoryRequirements); + + VkMemoryAllocateInfo allocateInfo = {}; + allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + allocateInfo.allocationSize = memoryRequirements.size; + allocateInfo.memoryTypeIndex = findMemoryType(memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + vkAllocateMemory(device_, &allocateInfo, nullptr, &target->farFieldMemory[i]); + vkBindImageMemory(device_, target->farFieldImages[i], target->farFieldMemory[i], 0); + } + + // far field image view + { + VkImageViewCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + createInfo.image = target->farFieldImages[i]; + createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; + createInfo.format = VK_FORMAT_R32G32B32A32_SFLOAT; + createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + createInfo.subresourceRange.levelCount = 1; + createInfo.subresourceRange.layerCount = 1; + + vkCreateImageView(device_, &createInfo, nullptr, &target->farFieldImageViews[i]); + } + + // far field framebuffer + { + VkFramebufferCreateInfo framebufferInfo = {}; + framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + framebufferInfo.renderPass = dofPass_->getRenderPass(); + framebufferInfo.attachmentCount = 1; + framebufferInfo.pAttachments = &target->farFieldImageViews[i]; + framebufferInfo.width = target->extent.width / 2; + framebufferInfo.height = target->extent.height / 2; + framebufferInfo.layers = 1; + + vkCreateFramebuffer(device_, &framebufferInfo, nullptr, &target->farFieldFramebuffers[i]); + } } postPass_->createDescriptorSet(target); + dofPass_->createDescriptorSet(target); VkCommandBufferAllocateInfo allocateInfo = {}; allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; @@ -371,6 +501,18 @@ void Renderer::destroyRenderTarget(RenderTarget* target) { vkFreeDescriptorSets(device_, descriptorPool_, target->numImages, target->postSets); for(uint32_t i = 0; i < target->numImages; i++) { + vkDestroyFramebuffer(device_, target->nearFieldFramebuffers[i], nullptr); + + vkDestroyImageView(device_, target->nearFieldImageViews[i], nullptr); + vkFreeMemory(device_, target->nearFieldMemory[i], nullptr); + vkDestroyImage(device_, target->nearFieldImages[i], nullptr); + + vkDestroyFramebuffer(device_, target->farFieldFramebuffers[i], nullptr); + + vkDestroyImageView(device_, target->farFieldImageViews[i], nullptr); + vkFreeMemory(device_, target->farFieldMemory[i], nullptr); + vkDestroyImage(device_, target->farFieldImages[i], nullptr); + vkDestroyFramebuffer(device_, target->offscreenFramebuffers[i], nullptr); vkDestroyImageView(device_, target->offscreenDepthImageViews[i], nullptr); @@ -384,11 +526,23 @@ void Renderer::destroyRenderTarget(RenderTarget* target) { vkDestroyFramebuffer(device_, target->swapchainFramebuffers[i], nullptr); vkDestroyImageView(device_, target->swapchainImageViews[i], nullptr); } - + + delete[] target->nearFieldFramebuffers; + delete[] target->nearFieldImageViews; + delete[] target->nearFieldMemory; + delete[] target->nearFieldImages; + + delete[] target->farFieldFramebuffers; + delete[] target->farFieldImageViews; + delete[] target->farFieldMemory; + delete[] target->farFieldImages; + delete[] target->offscreenFramebuffers; + delete[] target->offscreenDepthImageViews; delete[] target->offscreenDepthMemory; delete[] target->offscreenDepthImages; + delete[] target->offscreenColorImageViews; delete[] target->offscreenColorMemory; delete[] target->offscreenColorImages; @@ -397,6 +551,7 @@ void Renderer::destroyRenderTarget(RenderTarget* target) { delete[] target->swapchainImageViews; delete[] target->swapchainImages; + delete[] target->dofSets; delete[] target->postSets; vkDestroySwapchainKHR(device_, target->swapchain, nullptr); @@ -872,8 +1027,8 @@ void Renderer::createPresentationRenderPass() { void Renderer::createDescriptorPool() { const std::array poolSizes = { - VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 15}, - VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 15} + VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 25}, + VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 25} }; VkDescriptorPoolCreateInfo poolInfo = {}; @@ -881,7 +1036,7 @@ void Renderer::createDescriptorPool() { poolInfo.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; poolInfo.poolSizeCount = poolSizes.size(); poolInfo.pPoolSizes = poolSizes.data(); - poolInfo.maxSets = 15; + poolInfo.maxSets = 25; vkCreateDescriptorPool(device_, &poolInfo, nullptr, &descriptorPool_); }