Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • vtk/vtk
  • brad.king/vtk
  • allisonvacanti/vtk
  • chuck.atkins/vtk
  • shawn.waldon/vtk
  • robertmaynard/vtk
  • gcasey/vtk
  • danlipsa/vtk
  • sankhesh/vtk
  • nix/vtk
  • utkarsh.ayachit/vtk
  • seanm/vtk
  • amaclean/vtk
  • lorensen/vtk
  • dgobbi/vtk
  • jpouderoux/vtk
  • ken-martin/vtk
  • ben.boeckel/vtk
  • karsten.tausche/vtk
  • jbeezley/vtk
  • demarle/vtk
  • karasevpa/vtk
  • sebastien.jourdain/vtk
  • will.schroeder/vtk
  • pixelstime/vtk
  • sujin.philip/vtk
  • acbauer/vtk
  • berkgeveci/vtk
  • cory.quammen/vtk
  • john.tourtellott/vtk
  • jcfr/vtk
  • tcoulange/vtk
  • finetjul/vtk
  • aashish-chaudhary/vtk
  • tadeu/vtk
  • bxa/vtk
  • clinton/vtk
  • cjh1/vtk
  • alextsui05/vtk
  • jorge.suit/vtk
  • mwestphal/vtk
  • arnaudgelas/vtk
  • splines/vtk
  • dcthomp/spline-vtk
  • Lin.Ma/vtk-spline
  • matt-mccormick/vtk
  • ndfabian/vtk
  • scott.wittenburg/vtk
  • bill-hoffman/vtk
  • dlrdave/vtk
  • ethanb/vtk
  • johan-andruejol/vtk
  • ashray/vtk
  • Toki/vtk
  • kimbaol/vtk
  • toster/vtk
  • applekey/vtk
  • quantumsteve/vtk
  • a_neeman/vtk
  • biddisco/vtk
  • mhanwell/vtk
  • jjomier/vtk
  • dkuegler/vtk
  • bobifx/vtk
  • yordan_kyosev/vtk
  • schuyler.kylstra/vtk
  • ilya-five/vtk
  • JimCosby/vtk
  • Geng/vtk
  • blessley/vtk
  • davisb/vtk
  • iMSTK/vtk
  • chunmingchen/vtk
  • danlugli/vtk
  • chaffra/vtk
  • tjcorona/vtk
  • mennodeij1/vtk
  • thehummingbird/vtk
  • prabhuramachandran/vtk
  • chiranjibsur/vtk
  • jwilliz/vtk
  • wydesenej/vtk
  • msmolens/vtk
  • Lin.Ma/vtk
  • joseph.g.hennessey2.ctr/vtk
  • Wu/vtk
  • rogerkint/vtk
  • Xabi/vtk
  • timt/vtk
  • dcthomp/vtk
  • carson/vtk
  • Niels_Dekker/vtk
  • dmsurti/vtk
  • shreejan/vtk
  • brudfors/vtk
  • yumin/vtk
  • bob.obara/vtk
  • cousin_it/vtk
  • fstrati/vtk
  • dzenanz/vtk
  • cdeepakroy/vtk
  • LucasGandelKitware/vtk
  • alexis-girault/vtk
  • sreekanth-arikatla/vtk
  • SumedhaSingla/vtk
  • ricardo-ortiz/vtk
  • yboumenir/vtk
  • alvaro.sanchez/vtk
  • jgoizueta/vtk
  • bloring/vtk
  • akhilsurabhi/vtk
  • appstonic/vtk
  • waldyrious/vtk
  • lenlen/vtk
  • romangrothausmann/vtk
  • NevilX/vtk
  • natorious1990/vtk
  • ktsumura/vtk
  • radix-vinni/vtk
  • saumyabhadani/vtk
  • starius/vtk
  • mgenet/vtk
  • dhanannjay-deo/vtk
  • fabian.wenzel/vtk
  • stephenshamilton/vtk
  • axkibe/vtk
  • grlee77/vtk
  • lisa-avila/vtk
  • martyngigg/vtk
  • asmith/vtk
  • opoplawski/vtk
  • maxoox/vtk
  • estan/vtk
  • awehrfritz/vtk
  • dsgruss/vtk
  • abheekg/vtk
  • jfavre/vtk
  • updega2/vtk
  • dgraves/vtk
  • zaherabdulazeez/vtk
  • lisphacker/vtk
  • BillMcGrory/vtk
  • Yang/vtk
  • dumontal/vtk
  • DVigneault/vtk
  • xyjin213/vtk
  • cpatrick/vtk
  • isohatalaj/vtk
  • rete/vtk
  • polycorn/vtk
  • Promayon/vtk
  • karthik/vtk
  • betsy.mcphail/vtk
  • nschloe/vtk
  • kmorel/vtk
  • aniqah/vtk
  • rupertnash/vtk
  • iMichka/vtk
  • shru13448/vtk
  • aeslaughter/vtk
  • QuadmasterXLII1/vtk
  • QuadmasterXLII/vtk
  • haocheng.liu/vtk
  • LouisBergmann/vtk
  • alouis/vtk
  • bjacquet/vtk
  • pieper/vtk
  • Ychuan1115/vtk
  • srikanthnagella/vtk
  • SeunOdutola/vtk
  • jborck/vtk
  • sidd23295/vtk
  • besoft/vtk
  • imikejackson/vtk
  • tttanichka333/vtk
  • vromerocano/vtk
  • fogleman/vtk
  • hinashah/vtk
  • abhishekns/vtk
  • mpkh/vtk
  • borisb/vtk
  • eg/vtk
  • technic/vtk
  • chet.nieter/vtk
  • maleike/vtk
  • emmenlau/vtk
  • florianlink1/vtk
  • teracamo/vtk
  • ole.v.solberg/vtk
  • greenjava/vtk
  • snorrikris/vtk
  • HaipengY/vtk
  • hina/vtk
  • jhlegarreta/vtk
  • diorcety/vtk
  • taketwo/vtk
  • mnucci32/vtk
  • gnzlbg/vtk
  • dfroger/vtk
  • WimK/vtk
  • luciemacron/vtk
  • kd7uns/vtk
  • dgkf/vtk
  • gocarlos/vtk
  • jayas121/vtk
  • olesen/vtk
  • Jon_Garner/vtk
  • cschmitz/vtk
  • Paul/vtk
  • kaaalid/vtk
  • neiltwist/vtk
  • Hertz1239/vtk
  • jupiterben/vtk
  • pierre.guilbert/vtk
  • matthew-woehlke/vtk
  • xantares/vtk
  • mayeul.chassagnard/vtk
  • t-ikegami/vtk
  • gbivins4/vtk
  • jacob/vtk
  • sarahjelinek/vtk
  • nicolas.vuaille/vtk
  • art/vtk
  • Levinaz69/vtk
  • aron.helser/vtk
  • yohey/vtk
  • alex-novinski/vtk
  • jacob-becker/vtk
  • robbie_lxw/vtk
  • burhan14031/vtk
  • thewtex/vtk
  • ML/vtk
  • bilke/vtk
  • bvanelli/vtk
  • Juliano/vtk
  • luliag/vtk
  • muellni/vtk
  • barkinet/vtk
  • kvankooten/vtk
  • Moonypoony/vtk
  • derino/vtk
  • wendy/vtk
  • kekivelez/vtk
  • Archeks/vtk
  • claireguilbaud/vtk
  • lassoan/vtk
  • brunbennett/vtk
  • hcwiley/vtk
  • jie.cheng/vtk
  • kghandi/vtk
  • michalhabera/vtk
  • francis.giraldeau/vtk
  • GG1991/vtk
  • ugiwgh/vtk
  • benjaminjeliot/vtk
  • locuscaeruleus/vtk
  • marcelotrevisani/vtk
  • Punzo/vtk
  • pavelpokutnev/vtk
  • sylmarien/vtk
  • jbvimort/vtk
  • athius/vtk
  • aowen87/vtk
  • StefanBruens/vtk
  • nocnokneo/vtk
  • mds/vtk
  • forrest.li/vtk
  • rcfwgithub/vtk
  • Guenoleh/vtk
  • drmateo/vtk
  • ahota/vtk
  • nishanthkarthik/vtk
  • pdhahn/vtk
  • dyoll/vtk
  • todoooo/vtk
  • Rhodanos/vtk
  • bjoernthiel/vtk
  • pink2/vtk
  • thudacko/vtk
  • qwofford/vtk
  • michael.migliore/vtk
  • mohamedAhmedIsmailAhmed/vtk
  • m-chaturvedi/vtk
  • Shoepon/vtk
  • ricardomorello/vtk
  • ktsai/vtk
  • hjmjohnson/vtk
  • daseven/vtk
  • federico.miorelli/vtk
  • jianfulin/vtk
  • ihnorton/vtk
  • handrake0724/vtk
  • yuelinho777/vtk
  • eberroca/vtk
  • arankin/vtk
  • zachmullen/vtk
  • alzi/vtk
  • csukuangfj/vtk
  • nick.laurenson/vtk
  • ethan.stam/vtk
  • bujack/vtk
  • ifmfr/vtk
  • jpulido/vtk
  • Butakoff/vtk
  • innokentiy.alaytsev/vtk
  • hendersa/vtk
  • tkarabela/vtk
  • charly.girot/vtk
  • dbonhaus/vtk
  • ClundXIII/vtk
  • shaikan/vtk
  • timrowley/vtk
  • tomj/vtk
  • ombre5733/vtk
  • hbwhlk83/vtk
  • cpinter/vtk
  • fbudin/vtk
  • nehaljwani/vtk
  • CometS1/vtk
  • debian/vtk
  • jan.o.schutte/vtk
  • normanius/vtk
  • wehhh/vtk
  • bwspenc/vtk
  • sergioPereiraBR/vtk
  • yurivict/vtk
  • GarrettMorrison/vtk
  • jensgw/vtk
  • alesgenova/vtk
  • paulharris/vtk
  • niranjankala/vtk
  • tavaughan/vtk
  • christianezeani/vtk
  • kihneman/vtk
  • phcerdan/vtk
  • Bengt/vtk
  • knolla/vtk
  • shreeraj.jadhav/vtk
  • Sunderlandkyl/vtk
  • drpeterfranz/vtk
  • pedroneto/vtk
  • levara/vtk
  • TheBlackCat/vtk
  • Bleach665/vtk
  • JeromeDuboisPro/vtk
  • pbergeron/vtk
  • ilovezfs/vtk
  • jeffamstutz/vtk
  • edwinbennink/vtk
  • ChrisB/vtk
  • rexchai/vtk
  • jclearwater/vtk
  • pengzhou93/vtk
  • Cotrik/vtk
  • drouin-simon/vtk
  • Simon-GitLab/vtk
  • mcvaneede/vtk
  • aaigner/vtk
  • murraypurves/vtk
  • Chrisyunhua/vtk
  • lhofmann/vtk
  • tobias-haenel/vtk
  • aitormoreno/vtk
  • severnaa/vtk
  • angelika.ophagen/vtk
  • AOphagen/vtk
  • byhongda/vtk
  • HuangLiJinJames/vtk
  • NicholasWon47/vtk
  • chenhaomagnetic/vtk
  • imngy/vtk
  • maxGimeno/vtk
  • kf6kjg/vtk
  • cstew2/vtk
  • lrineau/vtk
  • vnpavanelli/vtk
  • erichlf/vtk
  • SunBlack/vtk
  • PauloCarvalhoRJ/vtk
  • McrmDev/vtk
  • jblekien/vtk
  • guerrero78/vtk
  • hygonsoc/vtk
  • embeddedmz/vtk
  • wangtaoz/vtk
  • andrea-iob/vtk
  • ollielo/vtk
  • boonth/vtk
  • lugia-kun/vtk
  • adrien.boucaud/vtk
  • jstark/vtk
  • mathiash/vtk
  • patrick-oleary/vtk
  • patchett2002/vtk
  • tbiedert/vtk
  • MITRALZ/vtk
  • shihabrus/vtk
  • Xaxetrov/vtk
  • charles.gueunet/vtk
  • edern.haumont/vtk
  • schweitzer/vtk
  • leonhardt/vtk
  • williamfgc/vtk
  • hococoder/vtk
  • barcharcraz/vtk
  • lcantell/vtk
  • matt.leotta/vtk
  • kairong.jiang/vtk
  • neohsbrother/vtk
  • animeshbaranawal/vtk
  • jdoenias/vtk
  • bmwiedemann/vtk
  • stephen.sanchez/vtk
  • kislinsk/vtk
  • yufeimi/vtk
  • milljm/vtk
  • zmj1316/vtk
  • zhuokaizhao/vtk
  • nghia.truong/vtk
  • Mycai/vtk
  • Jacques-Bernard/vtk
  • yohann.bearzi/vtk
  • nghiatruong.vn/vtk
  • caitlin.ross/vtk
  • aandreyev/vtk
  • robustwangace/vtk
  • kovtuh/vtk
  • paulyc/vtk
  • pronaman/vtk
  • huyidao625/vtk
  • EvgenyVRN/vtk
  • yale.lee_nga/vtk
  • cobo/vtk
  • Highphone110/vtk
  • kevin.tew/vtk
  • daniel.jasinski/vtk
  • aspsee/vtk
  • Jellby/vtk
  • Bo98/vtk
  • tkoyama010/vtk
  • keith.m.ballard/vtk
  • XinhuaZhang/vtk
  • Simon-Esneault/vtk
  • maximilian.reimer.42/vtk
  • florian360/vtk
  • patrick.avery/vtk
  • aglv/vtk
  • mat127/vtk
  • brad-t-moore/vtk
  • thompson318/vtk
  • kingyue737/vtk
  • gilcu2/vtk
  • shogarth/vtk
  • doktorkjeld/vtk
  • BuzzBurrowes/vtk
  • amine.aboufirass/vtk
  • kirv/vtk
  • westphalm/vtk
  • gerald.lodron/vtk
  • yhtank/vtk
  • paul.lafoix/vtk
  • KiraGabi/vtk
  • ralovich/vtk
  • s.jafari.m/vtk
  • richardc/vtk
  • larsoner/vtk
  • DavidLaidlaw/vtk
  • thibault.pelletier/vtk
  • dhanakoti.murali/vtk
  • vbolea/vtk
  • aravind90/vtk
  • martijnkoopman/vtk
  • Tiphainejh/vtk
  • yozara/vtk
  • NicolaasWeideman/vtk
  • fdepourcq/vtk
  • Eustache/vtk
  • mdaley/vtk
  • aangelos28/vtk
  • RaoGY/vtk
  • sthibaul/vtk
  • qyr3366/vtk
  • ChristophHonal/vtk
  • christos.tsolakis/vtk
  • Cool/vtk
  • hansuk/vtk
  • mdorier/vtk
  • nerdalien/vtk
  • yasushi.saito/vtk
  • paul.choisel/vtk
  • kovynev/vtk
  • bobmiller/vtk
  • ziqiangxu/vtk
  • gonzalosaezm55/vtk
  • SailCPU/vtk
  • woodbot/vtk
  • yufan/vtk
  • amelvill-umich/vtk
  • mossaiby/vtk
  • likehuaer/vtk
  • timothee.chabat/vtk
  • Senidenary/vtk
  • Tobias-Fischer/vtk
  • BrentFoster/vtk
  • thliebig/vtk
  • thomas.caissard/vtk
  • whophil/vtk
  • RafaelPalomar/vtk
  • Angelo-abel/vtk
  • shawfei/vtk
  • MattesSchu/vtk
  • guillaume.gindre/vtk
  • maack/vtk
  • zqcolorful/vtk
  • tniemi/vtk
  • imalkov82/vtk
  • bzindovic/vtk
  • v.engelgardt/vtk
  • mjjackey/vtk
  • halowine/vtk
  • laurenn.lam/vtk
  • zonghx/vtk
  • ProGamerCode/vtk
  • bp85540/vtk
  • julia.sanchez/vtk
  • melanie.carriere/vtk
  • jules.bourdais/vtk
  • jerome.dias/vtk
  • luca.lacaille/vtk
  • octave.rossi/vtk
  • c.wetterer-nelson/vtk
  • nima.tofighi/vtk
  • jspanchu/vtk
  • jsteele/vtk
  • PetiteViking/vtk
  • bart/vtk
  • LucasGasparinoBSC/vtk
  • julien2412/vtk
  • francois.mazen/vtk
  • lxgwd1983/vtk
  • timothee.couble/vtk
  • benntqoo/vtk
  • jmhawkins/vtk
  • stonej/vtk
  • JackGuyver/vtk
  • rodrigomologni/vtk
  • chart3388/vtk
  • jessica.marquis/vtk
  • rockandsalt/vtk
  • zakaria220/vtk
  • ryan.krattiger1/vtk
  • darktemplarbasealt/vtk
  • scbiradar/vtk
  • andrew.wilson/vtk
  • moritz-h/vtk
  • kyzitemelos93/vtk
  • qqzhoucn/vtk
  • laurent.malka/vtk
  • akaszynski/vtk
  • spiros.tsalikis/vtk
  • sercxjo/vtk
  • jesper.norell/vtk
  • tiffany.chhim/vtk
  • jennifermanzella11/vtk
  • loongson-zn/vtk
  • RonRahaman/vtk
  • Snethy/vtk
  • antoine.ruffion/vtk
  • air-h-128k-il/vtk
  • atomicky/vtk
  • chenjt2001/vtk
  • cclauss/vtk
  • Connor-Bowley/vtk
  • ferdymercury/vtk
  • connor.bowley/vtk
  • Tianyang86/vtk
  • stephen.crowell/vtk
  • xzhang/vtk
  • antoine.schieb/vtk
  • ArchangeGabriel/vtk
  • thomas.galland/vtk
  • kadabraqc/vtk
  • sanguinariojoe/vtk
  • tom.suchel/vtk
  • mgoodson-cvd/vtk
  • jtojnar/vtk
  • bwoodsend/vtk
  • frantze.raphael/vtk
  • YuhaoQiu/vtk
  • user-zmt/vtk
  • XiaLiChao82/vtk
  • biagas/vtk
  • gaspard.thevenon/vtk
  • nicoco/vtk
  • svenevs/vtk
  • olesalscheider/vtk
  • EvanHampton-Seequent/vtk
  • spsjorspeterse/vtk
  • p-j-smith/vtk
  • jaswant.panchumarti/vtk
  • nasos/vtk
  • david.berger/vtk
  • dmt/vtk
  • huangjx-beijing/vtk
  • andtokm/vtk
  • MehdiChinoune/vtk
  • alexy.pellegrini/vtk
  • lucas.givord/vtk
  • jianhui2769/vtk
  • chengchengjing/vtk
  • scienceasdf/vtk
  • maidamai0/vtk
  • GuillaumeFavelier/vtk
  • ayenpure/vtk
  • ikerssm/vtk
  • hakostra/vtk
  • epalmer/vtk
  • Dophi123/vtk
  • julien.fausty/vtk
  • zjibben/vtk
  • nncarlson/vtk
  • Simon96128/vtk
  • jiapei1001/vtk
  • jiapei100/vtk
  • Failxxx/vtk
  • jcorbettfrank/vtk
  • Xingorno/vtk
  • windgs/vtk
  • orbisvicis/vtk
  • jramaiah/vtk
  • luzpaz/vtk
  • kammnd/vtk
  • lokessh/vtk
  • QinChen1998/vtk
  • mirenradia/vtk
  • tbirdso/vtk
  • jones/vtk
  • HollowSun/vtk
  • db/vtk
  • matthiasbock/vtk
  • adam-grant-hendry/vtk
  • Kenichiro-Yoshimi/vtk
  • redemptorridon/vtk
  • Neumann-A/vtk
  • tereshkinvadimst/vtk
  • francorougier60/vtk
  • felixonmars/vtk
  • gabrielmuller/vtk
  • Alex9/vtk
  • XinweiHsu/vtk
  • Zz-er/vtk
  • ferdnyc/vtk
  • mahmoudmohamedk8200949/vtk
  • HamsaSaber/vtk
  • boomanaiden154/vtk
  • bebuch/vtk
  • fab/vtk
  • chrisadamsonmcri/vtk
  • yarous224/vtk
  • zhenhaochu/vtk
  • pranjal.sahu/vtk
  • thibault.bruyere/vtk
  • banesullivan/vtk
  • is0591hs/vtk
  • philipp.weissenbacher/vtk
  • xavier.tricoche/vtk
  • hopfer.benjamin/vtk
  • liyinyan/vtk
  • gabriel.lefloch/vtk
  • bistek/vtk
  • mmXIO/vtk
  • MicK7/vtk
  • jndxljun/vtk
  • Shirin_ra/vtk
  • hanfengyu/vtk
  • zhhaoyuting111/vtk
  • john-stone/vtk
  • woosukbyun0724/vtk
  • xenosLit/vtk
  • ashishmalik5am/vtk
  • j824h/vtk
  • jschueller/vtk
  • alexdewar/vtk
  • mo.gomaa1996/vtk
  • rubendebruin/vtk
  • CiaoTan/vtk
  • bhaq-tri/vtk
  • TattiRuby/vtk
  • griffin28/vtk
  • he.wryyy/vtk
  • aly.tireira/vtk
  • Riaa/vtk
  • b89alireza/vtk
  • john.parent/vtk
  • marin.kajtazi/vtk
  • andy9t7/vtk
  • mikedubb0811/vtk
  • kookoo9999/vtk
  • charly.bollinger/vtk
  • kamm1948/vtk
  • jacob.moore/vtk
  • gaoxinyu18/vtk
  • zhouyi-u/vtk
  • Robbie/vtk
  • couletj/vtk
  • julien.chaize1/vtk
  • shreeni152/vtk
  • lkaly/vtk
  • louis.gombert/vtk
  • linneapalmstrom/vtk
  • ppebay/vtk
  • Rocky/vtk
  • jiangxingkai/vtk
  • xutang/vtk
  • jens.munk.hansen/vtk
  • 5268368/vtk
  • jumbojing/vtk
  • vectorD/vtk
  • mo-schmid/vtk
  • CC9701/vtk
  • Diiiiii-9/vtk
  • moussa-li/vtk
  • MatthewFlamm/vtk
  • sergeylesnik/vtk
  • xpnguyen/vtk
  • FireFlyUh/vtk
  • klevzoff/vtk
  • jake.yun/vtk
  • ixxi/vtk
  • stavbodik/vtk
  • willdunklin/vtk
  • hjhicks321/vtk
  • ymao.mu/vtk
  • fghoussen/vtk
  • Otahal/vtk
  • OccupyMars2025/vtk
  • dglaeser/vtk
  • Skyde/vtk
  • rbinyahib/vtk
  • jmisedam/vtk
  • petlenz/vtk
  • nmnobre/vtk
  • mheinsen/vtk
  • LimitingFactor/vtk
  • Senguo/vtk
  • lee.newberg/vtk
  • svniemeijer/vtk
  • alfiogn/vtk
  • 2xB/vtk
  • karamozianalireza1010/vtk
  • astucky/vtk
  • penfe/vtk
  • lgombert/vtk
  • saichaitanya.rodda/vtk
  • thaumaturg1c/vtk
  • pkestene/vtk
  • rlexmann/vtk
  • gabokamaze/vtk
  • 212dandan/vtk
  • dinghao19960829/vtk
  • ScarpMarc/vtk
  • yangdonglai/vtk
  • stauffert.maxime/vtk
  • dyollb/vtk
  • hollowsunhc/vtk
  • loic.gaillard/vtk
  • ychaos/vtk
  • dperozzi/vtk
  • SeanCurtis-TRI/vtk
  • jhodges/vtk
  • jingliang2005/vtk
  • hbwhzc/vtk
  • zellmann/vtk
  • germa89/vtk
  • 2nafish117/vtk
  • uporersith/vtk
  • jordi.subirana/vtk
  • carlosgcolon/vtk
  • PeterFranzSeequent/vtk
  • cristianAnd24/vtk
  • mrbean-bremen/vtk
  • usiems/vtk
  • Krzmbrzl/vtk
  • tom.clabault/vtk
  • dpseiter1/vtk
  • scivision/vtk
  • Donvlouss/vtk
  • fedormsv/vtk
  • chandan.satija/vtk
  • anuraaga/vtk
  • kxxt/vtk
  • ryosga/vtk
  • Ajile3D/vtk-ajile
  • matekelemen/vtk
  • lililiangyu/vtk
  • ArashPartow/vtk
  • Peter.Zajac/vtk
  • zackgalbreath/vtk
  • bansan/vtk
  • leon.victor/vtk
  • gabbyva5/vtk
  • ClaudioHoffmann/vtk
  • larshg/vtk
  • barracuda156/vtk
  • niconiconi/vtk
  • quangvd3/vtk
  • lmaoha/vtk
  • jarredgaudineer/vtk
  • YingjuHan/vtk
  • afa/vtk
  • thecasterian/vtk
  • codeling/vtk
  • wortiz/vtk
  • undeadde/vtk
  • jonathanschilling/vtk
  • ychu-hoo/vtk
  • FlashPanda/vtk
  • rachedwaly/vtk
  • DownerCase/vtk
  • TurBoss/vtk
  • dunyazad/vtk
  • marquis.weng/vtk
  • chir-set/vtk
  • Mechazo11/az-vtk
  • nekto1989/vtk
  • jean.fechter/vtk
  • anonymous-fys/vtk
  • clintonstimpson/vtk
  • krande/vtk
  • zist8888/vtk
  • Joker-7-7/vtk
  • skashtanov/vtk
  • o.derewonko/vtk
  • ndelpuppo/vtk
  • kenavolic/vtk
  • raimund-schluessler/vtk
  • medmssaouri/vtk-pca-normals
  • karchit0509/vtk
  • gapry/vtk
  • guillaume-jacquenot/vtk
  • Dave-Allured/vtk
  • ctaylo41/vtk
  • johnnynunez/vtk
  • 1div0/vtk
  • aruggieri/vtk
  • LunaNebula/vtk
  • quinn.powell/vtk
  • exbluesbreaker/vtk
  • junghans/vtk
  • NikitaKolebaev/vtk
  • jwilson1881/vtk
  • celynchgt/vtk
  • guillaume.gisbert/vtk
853 results
Show changes
Commits on Source (4)
Showing
with 3277 additions and 1836 deletions
## Update fast_float to be based on 8.0.2
Update fast_float third party library to be based
on [fast_float 8.0.2](https://github.com/fastfloat/fast_float/releases/tag/v8.0.2).
......@@ -7,9 +7,9 @@ vtk_module_third_party(
SPDX_COPYRIGHT_TEXT
"Copyright (c) 2021 The fast_float authors"
SPDX_DOWNLOAD_LOCATION
"git+https://gitlab.kitware.com/third-party/fast_float.git@for/vtk-20230309-3.9.0"
"git+https://gitlab.kitware.com/third-party/fast_float.git@for/vtk-20250313-8.0.2"
VERSION
"3.9.0"
"8.0.2"
STANDARD_INCLUDE_DIRS
HEADER_ONLY
EXTERNAL
......
......@@ -8,7 +8,7 @@ readonly name="fast_float"
readonly ownership="fast_float Upstream <kwrobot@kitware.com>"
readonly subtree="ThirdParty/$name/vtk$name"
readonly repo="https://gitlab.kitware.com/third-party/fast_float.git"
readonly tag="for/vtk-20230309-3.9.0"
readonly tag="for/vtk-20250313-8.0.2"
readonly paths="
include/fast_float/*
.gitattributes
......
......@@ -3,12 +3,12 @@ vtk_module_install_headers(
FILES
vtkfast_float/ascii_number.h
vtkfast_float/bigint.h
vtkfast_float/constexpr_feature_detect.h
vtkfast_float/decimal_to_binary.h
vtkfast_float/digit_comparison.h
vtkfast_float/fast_float.h
vtkfast_float/fast_table.h
vtkfast_float/float_common.h
vtkfast_float/parse_number.h
vtkfast_float/simple_decimal_conversion.h
vtkfast_float/vtkfast_float_mangle.h
)
......@@ -4,3 +4,8 @@ Marcin Wojdyr
Neal Richardson
Tim Paine
Fabio Pellacini
Lénárd Szolnoki
Jan Pharago
Maya Warrier
Taha Khokhar
Anders Dalvander
......@@ -37,58 +37,58 @@ constexpr size_t bigint_limbs = bigint_bits / limb_bits;
// vector-like type that is allocated on the stack. the entire
// buffer is pre-allocated, and only the length changes.
template <uint16_t size>
struct stackvec {
template <uint16_t size> struct stackvec {
limb data[size];
// we never need more than 150 limbs
uint16_t length{0};
stackvec() = default;
stackvec(const stackvec &) = delete;
stackvec &operator=(const stackvec &) = delete;
stackvec(stackvec const &) = delete;
stackvec &operator=(stackvec const &) = delete;
stackvec(stackvec &&) = delete;
stackvec &operator=(stackvec &&other) = delete;
// create stack vector from existing limb span.
stackvec(limb_span s) {
FASTFLOAT_CONSTEXPR20 stackvec(limb_span s) {
FASTFLOAT_ASSERT(try_extend(s));
}
limb& operator[](size_t index) noexcept {
FASTFLOAT_CONSTEXPR14 limb &operator[](size_t index) noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
return data[index];
}
const limb& operator[](size_t index) const noexcept {
FASTFLOAT_CONSTEXPR14 const limb &operator[](size_t index) const noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
return data[index];
}
// index from the end of the container
const limb& rindex(size_t index) const noexcept {
FASTFLOAT_CONSTEXPR14 const limb &rindex(size_t index) const noexcept {
FASTFLOAT_DEBUG_ASSERT(index < length);
size_t rindex = length - index - 1;
return data[rindex];
}
// set the length, without bounds checking.
void set_len(size_t len) noexcept {
FASTFLOAT_CONSTEXPR14 void set_len(size_t len) noexcept {
length = uint16_t(len);
}
constexpr size_t len() const noexcept {
return length;
}
constexpr bool is_empty() const noexcept {
return length == 0;
}
constexpr size_t capacity() const noexcept {
return size;
}
constexpr size_t len() const noexcept { return length; }
constexpr bool is_empty() const noexcept { return length == 0; }
constexpr size_t capacity() const noexcept { return size; }
// append item to vector, without bounds checking
void push_unchecked(limb value) noexcept {
FASTFLOAT_CONSTEXPR14 void push_unchecked(limb value) noexcept {
data[length] = value;
length++;
}
// append item to vector, returning if item was added
bool try_push(limb value) noexcept {
FASTFLOAT_CONSTEXPR14 bool try_push(limb value) noexcept {
if (len() < capacity()) {
push_unchecked(value);
return true;
......@@ -96,14 +96,16 @@ struct stackvec {
return false;
}
}
// add items to the vector, from a span, without bounds checking
void extend_unchecked(limb_span s) noexcept {
limb* ptr = data + length;
::memcpy((void*)ptr, (const void*)s.ptr, sizeof(limb) * s.len());
FASTFLOAT_CONSTEXPR20 void extend_unchecked(limb_span s) noexcept {
limb *ptr = data + length;
std::copy_n(s.ptr, s.len(), ptr);
set_len(len() + s.len());
}
// try to add items to the vector, returning if items were added
bool try_extend(limb_span s) noexcept {
FASTFLOAT_CONSTEXPR20 bool try_extend(limb_span s) noexcept {
if (len() + s.len() <= capacity()) {
extend_unchecked(s);
return true;
......@@ -111,22 +113,25 @@ struct stackvec {
return false;
}
}
// resize the vector, without bounds checking
// if the new size is longer than the vector, assign value to each
// appended item.
FASTFLOAT_CONSTEXPR20
void resize_unchecked(size_t new_len, limb value) noexcept {
if (new_len > len()) {
size_t count = new_len - len();
limb* first = data + len();
limb* last = first + count;
limb *first = data + len();
limb *last = first + count;
::std::fill(first, last, value);
set_len(new_len);
} else {
set_len(new_len);
}
}
// try to resize the vector, returning if the vector was resized.
bool try_resize(size_t new_len, limb value) noexcept {
FASTFLOAT_CONSTEXPR20 bool try_resize(size_t new_len, limb value) noexcept {
if (new_len > capacity()) {
return false;
} else {
......@@ -134,10 +139,11 @@ struct stackvec {
return true;
}
}
// check if any limbs are non-zero after the given index.
// this needs to be done in reverse order, since the index
// is relative to the most significant limbs.
bool nonzero(size_t index) const noexcept {
FASTFLOAT_CONSTEXPR14 bool nonzero(size_t index) const noexcept {
while (index < len()) {
if (rindex(index) != 0) {
return true;
......@@ -146,29 +152,30 @@ struct stackvec {
}
return false;
}
// normalize the big integer, so most-significant zero limbs are removed.
void normalize() noexcept {
FASTFLOAT_CONSTEXPR14 void normalize() noexcept {
while (len() > 0 && rindex(0) == 0) {
length--;
}
}
};
fastfloat_really_inline
uint64_t empty_hi64(bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 uint64_t
empty_hi64(bool &truncated) noexcept {
truncated = false;
return 0;
}
fastfloat_really_inline
uint64_t uint64_hi64(uint64_t r0, bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 uint64_t
uint64_hi64(uint64_t r0, bool &truncated) noexcept {
truncated = false;
int shl = leading_zeroes(r0);
return r0 << shl;
}
fastfloat_really_inline
uint64_t uint64_hi64(uint64_t r0, uint64_t r1, bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 uint64_t
uint64_hi64(uint64_t r0, uint64_t r1, bool &truncated) noexcept {
int shl = leading_zeroes(r0);
if (shl == 0) {
truncated = r1 != 0;
......@@ -180,20 +187,20 @@ uint64_t uint64_hi64(uint64_t r0, uint64_t r1, bool& truncated) noexcept {
}
}
fastfloat_really_inline
uint64_t uint32_hi64(uint32_t r0, bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 uint64_t
uint32_hi64(uint32_t r0, bool &truncated) noexcept {
return uint64_hi64(r0, truncated);
}
fastfloat_really_inline
uint64_t uint32_hi64(uint32_t r0, uint32_t r1, bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 uint64_t
uint32_hi64(uint32_t r0, uint32_t r1, bool &truncated) noexcept {
uint64_t x0 = r0;
uint64_t x1 = r1;
return uint64_hi64((x0 << 32) | x1, truncated);
}
fastfloat_really_inline
uint64_t uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2, bool& truncated) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 uint64_t
uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2, bool &truncated) noexcept {
uint64_t x0 = r0;
uint64_t x1 = r1;
uint64_t x2 = r2;
......@@ -204,16 +211,17 @@ uint64_t uint32_hi64(uint32_t r0, uint32_t r1, uint32_t r2, bool& truncated) noe
// we want an efficient operation. for msvc, where
// we don't have built-in intrinsics, this is still
// pretty fast.
fastfloat_really_inline
limb scalar_add(limb x, limb y, bool& overflow) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 limb
scalar_add(limb x, limb y, bool &overflow) noexcept {
limb z;
// gcc and clang
#if defined(__has_builtin)
#if __has_builtin(__builtin_add_overflow)
#if __has_builtin(__builtin_add_overflow)
if (!cpp20_and_in_constexpr()) {
overflow = __builtin_add_overflow(x, y, &z);
return z;
#endif
}
#endif
#endif
// generic, this still optimizes correctly on MSVC.
......@@ -223,24 +231,24 @@ limb scalar_add(limb x, limb y, bool& overflow) noexcept {
}
// multiply two small integers, getting both the high and low bits.
fastfloat_really_inline
limb scalar_mul(limb x, limb y, limb& carry) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 limb
scalar_mul(limb x, limb y, limb &carry) noexcept {
#ifdef FASTFLOAT_64BIT_LIMB
#if defined(__SIZEOF_INT128__)
#if defined(__SIZEOF_INT128__)
// GCC and clang both define it as an extension.
__uint128_t z = __uint128_t(x) * __uint128_t(y) + __uint128_t(carry);
carry = limb(z >> limb_bits);
return limb(z);
#else
#else
// fallback, no native 128-bit integer multiplication with carry.
// on msvc, this optimizes identically, somehow.
value128 z = full_multiplication(x, y);
bool overflow;
z.low = scalar_add(z.low, carry, overflow);
z.high += uint64_t(overflow); // cannot overflow
z.high += uint64_t(overflow); // cannot overflow
carry = z.high;
return z.low;
#endif
#endif
#else
uint64_t z = uint64_t(x) * uint64_t(y) + uint64_t(carry);
carry = limb(z >> limb_bits);
......@@ -251,7 +259,8 @@ limb scalar_mul(limb x, limb y, limb& carry) noexcept {
// add scalar value to bigint starting from offset.
// used in grade school multiplication
template <uint16_t size>
inline bool small_add_from(stackvec<size>& vec, limb y, size_t start) noexcept {
inline FASTFLOAT_CONSTEXPR20 bool small_add_from(stackvec<size> &vec, limb y,
size_t start) noexcept {
size_t index = start;
limb carry = y;
bool overflow;
......@@ -268,13 +277,15 @@ inline bool small_add_from(stackvec<size>& vec, limb y, size_t start) noexcept {
// add scalar value to bigint.
template <uint16_t size>
fastfloat_really_inline bool small_add(stackvec<size>& vec, limb y) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 bool
small_add(stackvec<size> &vec, limb y) noexcept {
return small_add_from(vec, y, 0);
}
// multiply bigint by scalar value.
template <uint16_t size>
inline bool small_mul(stackvec<size>& vec, limb y) noexcept {
inline FASTFLOAT_CONSTEXPR20 bool small_mul(stackvec<size> &vec,
limb y) noexcept {
limb carry = 0;
for (size_t index = 0; index < vec.len(); index++) {
vec[index] = scalar_mul(vec[index], y, carry);
......@@ -288,11 +299,12 @@ inline bool small_mul(stackvec<size>& vec, limb y) noexcept {
// add bigint to bigint starting from index.
// used in grade school multiplication
template <uint16_t size>
bool large_add_from(stackvec<size>& x, limb_span y, size_t start) noexcept {
FASTFLOAT_CONSTEXPR20 bool large_add_from(stackvec<size> &x, limb_span y,
size_t start) noexcept {
// the effective x buffer is from `xstart..x.len()`, so exit early
// if we can't get that current range.
if (x.len() < start || y.len() > x.len() - start) {
FASTFLOAT_TRY(x.try_resize(y.len() + start, 0));
FASTFLOAT_TRY(x.try_resize(y.len() + start, 0));
}
bool carry = false;
......@@ -318,13 +330,14 @@ bool large_add_from(stackvec<size>& x, limb_span y, size_t start) noexcept {
// add bigint to bigint.
template <uint16_t size>
fastfloat_really_inline bool large_add_from(stackvec<size>& x, limb_span y) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 bool
large_add_from(stackvec<size> &x, limb_span y) noexcept {
return large_add_from(x, y, 0);
}
// grade-school multiplication algorithm
template <uint16_t size>
bool long_mul(stackvec<size>& x, limb_span y) noexcept {
FASTFLOAT_CONSTEXPR20 bool long_mul(stackvec<size> &x, limb_span y) noexcept {
limb_span xs = limb_span(x.data, x.len());
stackvec<size> z(xs);
limb_span zs = limb_span(z.data, z.len());
......@@ -352,7 +365,7 @@ bool long_mul(stackvec<size>& x, limb_span y) noexcept {
// grade-school multiplication algorithm
template <uint16_t size>
bool large_mul(stackvec<size>& x, limb_span y) noexcept {
FASTFLOAT_CONSTEXPR20 bool large_mul(stackvec<size> &x, limb_span y) noexcept {
if (y.len() == 1) {
FASTFLOAT_TRY(small_mul(x, y[0]));
} else {
......@@ -361,21 +374,75 @@ bool large_mul(stackvec<size>& x, limb_span y) noexcept {
return true;
}
template <typename = void> struct pow5_tables {
static constexpr uint32_t large_step = 135;
static constexpr uint64_t small_power_of_5[] = {
1UL,
5UL,
25UL,
125UL,
625UL,
3125UL,
15625UL,
78125UL,
390625UL,
1953125UL,
9765625UL,
48828125UL,
244140625UL,
1220703125UL,
6103515625UL,
30517578125UL,
152587890625UL,
762939453125UL,
3814697265625UL,
19073486328125UL,
95367431640625UL,
476837158203125UL,
2384185791015625UL,
11920928955078125UL,
59604644775390625UL,
298023223876953125UL,
1490116119384765625UL,
7450580596923828125UL,
};
#ifdef FASTFLOAT_64BIT_LIMB
constexpr static limb large_power_of_5[] = {
1414648277510068013UL, 9180637584431281687UL, 4539964771860779200UL,
10482974169319127550UL, 198276706040285095UL};
#else
constexpr static limb large_power_of_5[] = {
4279965485U, 329373468U, 4020270615U, 2137533757U, 4287402176U,
1057042919U, 1071430142U, 2440757623U, 381945767U, 46164893U};
#endif
};
#if FASTFLOAT_DETAIL_MUST_DEFINE_CONSTEXPR_VARIABLE
template <typename T> constexpr uint32_t pow5_tables<T>::large_step;
template <typename T> constexpr uint64_t pow5_tables<T>::small_power_of_5[];
template <typename T> constexpr limb pow5_tables<T>::large_power_of_5[];
#endif
// big integer type. implements a small subset of big integer
// arithmetic, using simple algorithms since asymptotically
// faster algorithms are slower for a small number of limbs.
// all operations assume the big-integer is normalized.
struct bigint {
struct bigint : pow5_tables<> {
// storage of the limbs, in little-endian order.
stackvec<bigint_limbs> vec;
bigint(): vec() {}
bigint(const bigint &) = delete;
bigint &operator=(const bigint &) = delete;
FASTFLOAT_CONSTEXPR20 bigint() : vec() {}
bigint(bigint const &) = delete;
bigint &operator=(bigint const &) = delete;
bigint(bigint &&) = delete;
bigint &operator=(bigint &&other) = delete;
bigint(uint64_t value): vec() {
FASTFLOAT_CONSTEXPR20 bigint(uint64_t value) : vec() {
#ifdef FASTFLOAT_64BIT_LIMB
vec.push_unchecked(value);
#else
......@@ -387,7 +454,7 @@ struct bigint {
// get the high 64 bits from the vector, and if bits were truncated.
// this is to get the significant digits for the float.
uint64_t hi64(bool& truncated) const noexcept {
FASTFLOAT_CONSTEXPR20 uint64_t hi64(bool &truncated) const noexcept {
#ifdef FASTFLOAT_64BIT_LIMB
if (vec.len() == 0) {
return empty_hi64(truncated);
......@@ -406,7 +473,8 @@ struct bigint {
} else if (vec.len() == 2) {
return uint32_hi64(vec.rindex(0), vec.rindex(1), truncated);
} else {
uint64_t result = uint32_hi64(vec.rindex(0), vec.rindex(1), vec.rindex(2), truncated);
uint64_t result =
uint32_hi64(vec.rindex(0), vec.rindex(1), vec.rindex(2), truncated);
truncated |= vec.nonzero(3);
return result;
}
......@@ -419,7 +487,7 @@ struct bigint {
// positive, this is larger, otherwise they are equal.
// the limbs are stored in little-endian order, so we
// must compare the limbs in ever order.
int compare(const bigint& other) const noexcept {
FASTFLOAT_CONSTEXPR20 int compare(bigint const &other) const noexcept {
if (vec.len() > other.vec.len()) {
return 1;
} else if (vec.len() < other.vec.len()) {
......@@ -440,7 +508,7 @@ struct bigint {
// shift left each limb n bits, carrying over to the new limb
// returns true if we were able to shift all the digits.
bool shl_bits(size_t n) noexcept {
FASTFLOAT_CONSTEXPR20 bool shl_bits(size_t n) noexcept {
// Internally, for each item, we shift left by n, and add the previous
// right shifted limb-bits.
// For example, we transform (for u8) shifted left 2, to:
......@@ -466,18 +534,18 @@ struct bigint {
}
// move the limbs left by `n` limbs.
bool shl_limbs(size_t n) noexcept {
FASTFLOAT_CONSTEXPR20 bool shl_limbs(size_t n) noexcept {
FASTFLOAT_DEBUG_ASSERT(n != 0);
if (n + vec.len() > vec.capacity()) {
return false;
} else if (!vec.is_empty()) {
// move limbs
limb* dst = vec.data + n;
const limb* src = vec.data;
::memmove(dst, src, sizeof(limb) * vec.len());
limb *dst = vec.data + n;
limb const *src = vec.data;
std::copy_backward(src, src + vec.len(), dst + vec.len());
// fill in empty limbs
limb* first = vec.data;
limb* last = first + n;
limb *first = vec.data;
limb *last = first + n;
::std::fill(first, last, 0);
vec.set_len(n + vec.len());
return true;
......@@ -487,7 +555,7 @@ struct bigint {
}
// move the limbs left by `n` bits.
bool shl(size_t n) noexcept {
FASTFLOAT_CONSTEXPR20 bool shl(size_t n) noexcept {
size_t rem = n % limb_bits;
size_t div = n / limb_bits;
if (rem != 0) {
......@@ -500,7 +568,7 @@ struct bigint {
}
// get the number of leading zeros in the bigint.
int ctlz() const noexcept {
FASTFLOAT_CONSTEXPR20 int ctlz() const noexcept {
if (vec.is_empty()) {
return 0;
} else {
......@@ -515,45 +583,21 @@ struct bigint {
}
// get the number of bits in the bigint.
int bit_length() const noexcept {
FASTFLOAT_CONSTEXPR20 int bit_length() const noexcept {
int lz = ctlz();
return int(limb_bits * vec.len()) - lz;
}
bool mul(limb y) noexcept {
return small_mul(vec, y);
}
FASTFLOAT_CONSTEXPR20 bool mul(limb y) noexcept { return small_mul(vec, y); }
bool add(limb y) noexcept {
return small_add(vec, y);
}
FASTFLOAT_CONSTEXPR20 bool add(limb y) noexcept { return small_add(vec, y); }
// multiply as if by 2 raised to a power.
bool pow2(uint32_t exp) noexcept {
return shl(exp);
}
FASTFLOAT_CONSTEXPR20 bool pow2(uint32_t exp) noexcept { return shl(exp); }
// multiply as if by 5 raised to a power.
bool pow5(uint32_t exp) noexcept {
FASTFLOAT_CONSTEXPR20 bool pow5(uint32_t exp) noexcept {
// multiply by a power of 5
static constexpr uint32_t large_step = 135;
static constexpr uint64_t small_power_of_5[] = {
1UL, 5UL, 25UL, 125UL, 625UL, 3125UL, 15625UL, 78125UL, 390625UL,
1953125UL, 9765625UL, 48828125UL, 244140625UL, 1220703125UL,
6103515625UL, 30517578125UL, 152587890625UL, 762939453125UL,
3814697265625UL, 19073486328125UL, 95367431640625UL, 476837158203125UL,
2384185791015625UL, 11920928955078125UL, 59604644775390625UL,
298023223876953125UL, 1490116119384765625UL, 7450580596923828125UL,
};
#ifdef FASTFLOAT_64BIT_LIMB
constexpr static limb large_power_of_5[] = {
1414648277510068013UL, 9180637584431281687UL, 4539964771860779200UL,
10482974169319127550UL, 198276706040285095UL};
#else
constexpr static limb large_power_of_5[] = {
4279965485U, 329373468U, 4020270615U, 2137533757U, 4287402176U,
1057042919U, 1071430142U, 2440757623U, 381945767U, 46164893U};
#endif
size_t large_length = sizeof(large_power_of_5) / sizeof(limb);
limb_span large = limb_span(large_power_of_5, large_length);
while (exp >= large_step) {
......@@ -572,14 +616,18 @@ struct bigint {
exp -= small_step;
}
if (exp != 0) {
FASTFLOAT_TRY(small_mul(vec, limb(small_power_of_5[exp])));
// Work around clang bug https://godbolt.org/z/zedh7rrhc
// This is similar to https://github.com/llvm/llvm-project/issues/47746,
// except the workaround described there don't work here
FASTFLOAT_TRY(small_mul(
vec, limb(((void)small_power_of_5[0], small_power_of_5[exp]))));
}
return true;
}
// multiply as if by 10 raised to a power.
bool pow10(uint32_t exp) noexcept {
FASTFLOAT_CONSTEXPR20 bool pow10(uint32_t exp) noexcept {
FASTFLOAT_TRY(pow5(exp));
return pow2(exp);
}
......
#ifndef FASTFLOAT_CONSTEXPR_FEATURE_DETECT_H
#define FASTFLOAT_CONSTEXPR_FEATURE_DETECT_H
#ifdef __has_include
#if __has_include(<version>)
#include <version>
#endif
#endif
// Testing for https://wg21.link/N3652, adopted in C++14
#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304
#define FASTFLOAT_CONSTEXPR14 constexpr
#else
#define FASTFLOAT_CONSTEXPR14
#endif
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
#define FASTFLOAT_HAS_BIT_CAST 1
#else
#define FASTFLOAT_HAS_BIT_CAST 0
#endif
#if defined(__cpp_lib_is_constant_evaluated) && \
__cpp_lib_is_constant_evaluated >= 201811L
#define FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 1
#else
#define FASTFLOAT_HAS_IS_CONSTANT_EVALUATED 0
#endif
#if defined(__cpp_if_constexpr) && __cpp_if_constexpr >= 201606L
#define FASTFLOAT_IF_CONSTEXPR17(x) if constexpr (x)
#else
#define FASTFLOAT_IF_CONSTEXPR17(x) if (x)
#endif
// Testing for relevant C++20 constexpr library features
#if FASTFLOAT_HAS_IS_CONSTANT_EVALUATED && FASTFLOAT_HAS_BIT_CAST && \
defined(__cpp_lib_constexpr_algorithms) && \
__cpp_lib_constexpr_algorithms >= 201806L /*For std::copy and std::fill*/
#define FASTFLOAT_CONSTEXPR20 constexpr
#define FASTFLOAT_IS_CONSTEXPR 1
#else
#define FASTFLOAT_CONSTEXPR20
#define FASTFLOAT_IS_CONSTEXPR 0
#endif
#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define FASTFLOAT_DETAIL_MUST_DEFINE_CONSTEXPR_VARIABLE 0
#else
#define FASTFLOAT_DETAIL_MUST_DEFINE_CONSTEXPR_VARIABLE 1
#endif
#endif // FASTFLOAT_CONSTEXPR_FEATURE_DETECT_H
......@@ -12,27 +12,34 @@
namespace fast_float {
// This will compute or rather approximate w * 5**q and return a pair of 64-bit words approximating
// the result, with the "high" part corresponding to the most significant bits and the
// low part corresponding to the least significant bits.
// This will compute or rather approximate w * 5**q and return a pair of 64-bit
// words approximating the result, with the "high" part corresponding to the
// most significant bits and the low part corresponding to the least significant
// bits.
//
template <int bit_precision>
fastfloat_really_inline
value128 compute_product_approximation(int64_t q, uint64_t w) {
const int index = 2 * int(q - powers::smallest_power_of_five);
// For small values of q, e.g., q in [0,27], the answer is always exact because
// The line value128 firstproduct = full_multiplication(w, power_of_five_128[index]);
// gives the exact answer.
value128 firstproduct = full_multiplication(w, powers::power_of_five_128[index]);
static_assert((bit_precision >= 0) && (bit_precision <= 64), " precision should be in (0,64]");
constexpr uint64_t precision_mask = (bit_precision < 64) ?
(uint64_t(0xFFFFFFFFFFFFFFFF) >> bit_precision)
: uint64_t(0xFFFFFFFFFFFFFFFF);
if((firstproduct.high & precision_mask) == precision_mask) { // could further guard with (lower + w < lower)
// regarding the second product, we only need secondproduct.high, but our expectation is that the compiler will optimize this extra work away if needed.
value128 secondproduct = full_multiplication(w, powers::power_of_five_128[index + 1]);
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 value128
compute_product_approximation(int64_t q, uint64_t w) {
int const index = 2 * int(q - powers::smallest_power_of_five);
// For small values of q, e.g., q in [0,27], the answer is always exact
// because The line value128 firstproduct = full_multiplication(w,
// power_of_five_128[index]); gives the exact answer.
value128 firstproduct =
full_multiplication(w, powers::power_of_five_128[index]);
static_assert((bit_precision >= 0) && (bit_precision <= 64),
" precision should be in (0,64]");
constexpr uint64_t precision_mask =
(bit_precision < 64) ? (uint64_t(0xFFFFFFFFFFFFFFFF) >> bit_precision)
: uint64_t(0xFFFFFFFFFFFFFFFF);
if ((firstproduct.high & precision_mask) ==
precision_mask) { // could further guard with (lower + w < lower)
// regarding the second product, we only need secondproduct.high, but our
// expectation is that the compiler will optimize this extra work away if
// needed.
value128 secondproduct =
full_multiplication(w, powers::power_of_five_128[index + 1]);
firstproduct.low += secondproduct.high;
if(secondproduct.high > firstproduct.low) {
if (secondproduct.high > firstproduct.low) {
firstproduct.high++;
}
}
......@@ -48,50 +55,52 @@ namespace detail {
* where
* p = log(5**q)/log(2) = q * log(5)/log(2)
*
* For negative values of q in (-400,0), we have that
* For negative values of q in (-400,0), we have that
* f = (((152170 + 65536) * q ) >> 16);
* is equal to
* is equal to
* -ceil(p) + q
* where
* p = log(5**-q)/log(2) = -q * log(5)/log(2)
*/
constexpr fastfloat_really_inline int32_t power(int32_t q) noexcept {
return (((152170 + 65536) * q) >> 16) + 63;
}
constexpr fastfloat_really_inline int32_t power(int32_t q) noexcept {
return (((152170 + 65536) * q) >> 16) + 63;
}
} // namespace detail
// create an adjusted mantissa, biased by the invalid power2
// for significant digits already multiplied by 10 ** q.
template <typename binary>
fastfloat_really_inline
adjusted_mantissa compute_error_scaled(int64_t q, uint64_t w, int lz) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 adjusted_mantissa
compute_error_scaled(int64_t q, uint64_t w, int lz) noexcept {
int hilz = int(w >> 63) ^ 1;
adjusted_mantissa answer;
answer.mantissa = w << hilz;
int bias = binary::mantissa_explicit_bits() - binary::minimum_exponent();
answer.power2 = int32_t(detail::power(int32_t(q)) + bias - hilz - lz - 62 + invalid_am_bias);
answer.power2 = int32_t(detail::power(int32_t(q)) + bias - hilz - lz - 62 +
invalid_am_bias);
return answer;
}
// w * 10 ** q, without rounding the representation up.
// the power2 in the exponent will be adjusted by invalid_am_bias.
template <typename binary>
fastfloat_really_inline
adjusted_mantissa compute_error(int64_t q, uint64_t w) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
compute_error(int64_t q, uint64_t w) noexcept {
int lz = leading_zeroes(w);
w <<= lz;
value128 product = compute_product_approximation<binary::mantissa_explicit_bits() + 3>(q, w);
value128 product =
compute_product_approximation<binary::mantissa_explicit_bits() + 3>(q, w);
return compute_error_scaled<binary>(q, product.high, lz);
}
// w * 10 ** q
// The returned value should be a valid ieee64 number that simply need to be packed.
// However, in some very rare cases, the computation will fail. In such cases, we
// return an adjusted_mantissa with a negative power of 2: the caller should recompute
// in such cases.
// Computers w * 10 ** q.
// The returned value should be a valid number that simply needs to be
// packed. However, in some very rare cases, the computation will fail. In such
// cases, we return an adjusted_mantissa with a negative power of 2: the caller
// should recompute in such cases.
template <typename binary>
fastfloat_really_inline
adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
compute_float(int64_t q, uint64_t w) noexcept {
adjusted_mantissa answer;
if ((w == 0) || (q < binary::smallest_power_of_ten())) {
answer.power2 = 0;
......@@ -105,7 +114,8 @@ adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
answer.mantissa = 0;
return answer;
}
// At this point in time q is in [powers::smallest_power_of_five, powers::largest_power_of_five].
// At this point in time q is in [powers::smallest_power_of_five,
// powers::largest_power_of_five].
// We want the most significant bit of i to be 1. Shift if needed.
int lz = leading_zeroes(w);
......@@ -114,31 +124,32 @@ adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
// The required precision is binary::mantissa_explicit_bits() + 3 because
// 1. We need the implicit bit
// 2. We need an extra bit for rounding purposes
// 3. We might lose a bit due to the "upperbit" routine (result too small, requiring a shift)
value128 product = compute_product_approximation<binary::mantissa_explicit_bits() + 3>(q, w);
if(product.low == 0xFFFFFFFFFFFFFFFF) { // could guard it further
// In some very rare cases, this could happen, in which case we might need a more accurate
// computation that what we can provide cheaply. This is very, very unlikely.
//
const bool inside_safe_exponent = (q >= -27) && (q <= 55); // always good because 5**q <2**128 when q>=0,
// and otherwise, for q<0, we have 5**-q<2**64 and the 128-bit reciprocal allows for exact computation.
if(!inside_safe_exponent) {
return compute_error_scaled<binary>(q, product.high, lz);
}
}
// The "compute_product_approximation" function can be slightly slower than a branchless approach:
// value128 product = compute_product(q, w);
// but in practice, we can win big with the compute_product_approximation if its additional branch
// is easily predicted. Which is best is data specific.
// 3. We might lose a bit due to the "upperbit" routine (result too small,
// requiring a shift)
value128 product =
compute_product_approximation<binary::mantissa_explicit_bits() + 3>(q, w);
// The computed 'product' is always sufficient.
// Mathematical proof:
// Noble Mushtak and Daniel Lemire, Fast Number Parsing Without Fallback (to
// appear) See script/mushtak_lemire.py
// The "compute_product_approximation" function can be slightly slower than a
// branchless approach: value128 product = compute_product(q, w); but in
// practice, we can win big with the compute_product_approximation if its
// additional branch is easily predicted. Which is best is data specific.
int upperbit = int(product.high >> 63);
int shift = upperbit + 64 - binary::mantissa_explicit_bits() - 3;
answer.mantissa = product.high >> (upperbit + 64 - binary::mantissa_explicit_bits() - 3);
answer.mantissa = product.high >> shift;
answer.power2 = int32_t(detail::power(int32_t(q)) + upperbit - lz - binary::minimum_exponent());
answer.power2 = int32_t(detail::power(int32_t(q)) + upperbit - lz -
binary::minimum_exponent());
if (answer.power2 <= 0) { // we have a subnormal?
// Here have that answer.power2 <= 0 so -answer.power2 >= 0
if(-answer.power2 + 1 >= 64) { // if we have more than 64 bits below the minimum exponent, you have a zero for sure.
if (-answer.power2 + 1 >=
64) { // if we have more than 64 bits below the minimum exponent, you
// have a zero for sure.
answer.power2 = 0;
answer.mantissa = 0;
// result should be zero
......@@ -147,7 +158,8 @@ adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
// next line is safe because -answer.power2 + 1 < 64
answer.mantissa >>= -answer.power2 + 1;
// Thankfully, we can't have both "round-to-even" and subnormals because
// "round-to-even" only occurs for powers close to 0.
// "round-to-even" only occurs for powers close to 0 in the 32-bit and
// and 64-bit case (with no more than 19 digits).
answer.mantissa += (answer.mantissa & 1); // round up
answer.mantissa >>= 1;
// There is a weird scenario where we don't have a subnormal but just.
......@@ -157,20 +169,26 @@ adjusted_mantissa compute_float(int64_t q, uint64_t w) noexcept {
// up 0x3fffffffffffff x 2^-1023-53 and once we do, we are no longer
// subnormal, but we can only know this after rounding.
// So we only declare a subnormal if we are smaller than the threshold.
answer.power2 = (answer.mantissa < (uint64_t(1) << binary::mantissa_explicit_bits())) ? 0 : 1;
answer.power2 =
(answer.mantissa < (uint64_t(1) << binary::mantissa_explicit_bits()))
? 0
: 1;
return answer;
}
// usually, we round *up*, but if we fall right in between and and we have an
// even basis, we need to round down
// We are only concerned with the cases where 5**q fits in single 64-bit word.
if ((product.low <= 1) && (q >= binary::min_exponent_round_to_even()) && (q <= binary::max_exponent_round_to_even()) &&
((answer.mantissa & 3) == 1) ) { // we may fall between two floats!
if ((product.low <= 1) && (q >= binary::min_exponent_round_to_even()) &&
(q <= binary::max_exponent_round_to_even()) &&
((answer.mantissa & 3) == 1)) { // we may fall between two floats!
// To be in-between two floats we need that in doing
// answer.mantissa = product.high >> (upperbit + 64 - binary::mantissa_explicit_bits() - 3);
// ... we dropped out only zeroes. But if this happened, then we can go back!!!
if((answer.mantissa << (upperbit + 64 - binary::mantissa_explicit_bits() - 3)) == product.high) {
answer.mantissa &= ~uint64_t(1); // flip it so that we do not round up
// answer.mantissa = product.high >> (upperbit + 64 -
// binary::mantissa_explicit_bits() - 3);
// ... we dropped out only zeroes. But if this happened, then we can go
// back!!!
if ((answer.mantissa << shift) == product.high) {
answer.mantissa &= ~uint64_t(1); // flip it so that we do not round up
}
}
......
......@@ -13,17 +13,34 @@
namespace fast_float {
// 1e0 to 1e19
constexpr static uint64_t powers_of_ten_uint64[] = {
1UL, 10UL, 100UL, 1000UL, 10000UL, 100000UL, 1000000UL, 10000000UL, 100000000UL,
1000000000UL, 10000000000UL, 100000000000UL, 1000000000000UL, 10000000000000UL,
100000000000000UL, 1000000000000000UL, 10000000000000000UL, 100000000000000000UL,
1000000000000000000UL, 10000000000000000000UL};
constexpr static uint64_t powers_of_ten_uint64[] = {1UL,
10UL,
100UL,
1000UL,
10000UL,
100000UL,
1000000UL,
10000000UL,
100000000UL,
1000000000UL,
10000000000UL,
100000000000UL,
1000000000000UL,
10000000000000UL,
100000000000000UL,
1000000000000000UL,
10000000000000000UL,
100000000000000000UL,
1000000000000000000UL,
10000000000000000000UL};
// calculate the exponent, in scientific notation, of the number.
// this algorithm is not even close to optimized, but it has no practical
// effect on performance: in order to have a faster algorithm, we'd need
// to slow down performance for faster algorithms, and this is still fast.
fastfloat_really_inline int32_t scientific_exponent(parsed_number_string& num) noexcept {
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 int32_t
scientific_exponent(parsed_number_string_t<UC> &num) noexcept {
uint64_t mantissa = num.mantissa;
int32_t exponent = int32_t(num.exponent);
while (mantissa >= 10000) {
......@@ -43,23 +60,30 @@ fastfloat_really_inline int32_t scientific_exponent(parsed_number_string& num) n
// this converts a native floating-point number to an extended-precision float.
template <typename T>
fastfloat_really_inline adjusted_mantissa to_extended(T value) noexcept {
using equiv_uint = typename binary_format<T>::equiv_uint;
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
to_extended(T value) noexcept {
using equiv_uint = equiv_uint_t<T>;
constexpr equiv_uint exponent_mask = binary_format<T>::exponent_mask();
constexpr equiv_uint mantissa_mask = binary_format<T>::mantissa_mask();
constexpr equiv_uint hidden_bit_mask = binary_format<T>::hidden_bit_mask();
adjusted_mantissa am;
int32_t bias = binary_format<T>::mantissa_explicit_bits() - binary_format<T>::minimum_exponent();
int32_t bias = binary_format<T>::mantissa_explicit_bits() -
binary_format<T>::minimum_exponent();
equiv_uint bits;
#if FASTFLOAT_HAS_BIT_CAST
bits = std::bit_cast<equiv_uint>(value);
#else
::memcpy(&bits, &value, sizeof(T));
#endif
if ((bits & exponent_mask) == 0) {
// denormal
am.power2 = 1 - bias;
am.mantissa = bits & mantissa_mask;
} else {
// normal
am.power2 = int32_t((bits & exponent_mask) >> binary_format<T>::mantissa_explicit_bits());
am.power2 = int32_t((bits & exponent_mask) >>
binary_format<T>::mantissa_explicit_bits());
am.power2 -= bias;
am.mantissa = (bits & mantissa_mask) | hidden_bit_mask;
}
......@@ -71,7 +95,8 @@ fastfloat_really_inline adjusted_mantissa to_extended(T value) noexcept {
// we are given a native float that represents b, so we need to adjust it
// halfway between b and b+u.
template <typename T>
fastfloat_really_inline adjusted_mantissa to_extended_halfway(T value) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
to_extended_halfway(T value) noexcept {
adjusted_mantissa am = to_extended(value);
am.mantissa <<= 1;
am.mantissa += 1;
......@@ -81,14 +106,18 @@ fastfloat_really_inline adjusted_mantissa to_extended_halfway(T value) noexcept
// round an extended-precision float to the nearest machine float.
template <typename T, typename callback>
fastfloat_really_inline void round(adjusted_mantissa& am, callback cb) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 void round(adjusted_mantissa &am,
callback cb) noexcept {
int32_t mantissa_shift = 64 - binary_format<T>::mantissa_explicit_bits() - 1;
if (-am.power2 >= mantissa_shift) {
// have a denormal float
int32_t shift = -am.power2 + 1;
cb(am, std::min<int32_t>(shift, 64));
// check for round-up: if rounding-nearest carried us to the hidden bit.
am.power2 = (am.mantissa < (uint64_t(1) << binary_format<T>::mantissa_explicit_bits())) ? 0 : 1;
am.power2 = (am.mantissa <
(uint64_t(1) << binary_format<T>::mantissa_explicit_bits()))
? 0
: 1;
return;
}
......@@ -96,7 +125,8 @@ fastfloat_really_inline void round(adjusted_mantissa& am, callback cb) noexcept
cb(am, mantissa_shift);
// check for carry
if (am.mantissa >= (uint64_t(2) << binary_format<T>::mantissa_explicit_bits())) {
if (am.mantissa >=
(uint64_t(2) << binary_format<T>::mantissa_explicit_bits())) {
am.mantissa = (uint64_t(1) << binary_format<T>::mantissa_explicit_bits());
am.power2++;
}
......@@ -110,20 +140,11 @@ fastfloat_really_inline void round(adjusted_mantissa& am, callback cb) noexcept
}
template <typename callback>
fastfloat_really_inline
void round_nearest_tie_even(adjusted_mantissa& am, int32_t shift, callback cb) noexcept {
uint64_t mask;
uint64_t halfway;
if (shift == 64) {
mask = UINT64_MAX;
} else {
mask = (uint64_t(1) << shift) - 1;
}
if (shift == 0) {
halfway = 0;
} else {
halfway = uint64_t(1) << (shift - 1);
}
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 void
round_nearest_tie_even(adjusted_mantissa &am, int32_t shift,
callback cb) noexcept {
uint64_t const mask = (shift == 64) ? UINT64_MAX : (uint64_t(1) << shift) - 1;
uint64_t const halfway = (shift == 0) ? 0 : uint64_t(1) << (shift - 1);
uint64_t truncated_bits = am.mantissa & mask;
bool is_above = truncated_bits > halfway;
bool is_halfway = truncated_bits == halfway;
......@@ -140,7 +161,8 @@ void round_nearest_tie_even(adjusted_mantissa& am, int32_t shift, callback cb) n
am.mantissa += uint64_t(cb(is_odd, is_halfway, is_above));
}
fastfloat_really_inline void round_down(adjusted_mantissa& am, int32_t shift) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 void
round_down(adjusted_mantissa &am, int32_t shift) noexcept {
if (shift == 64) {
am.mantissa = 0;
} else {
......@@ -149,17 +171,20 @@ fastfloat_really_inline void round_down(adjusted_mantissa& am, int32_t shift) no
am.power2 += shift;
}
fastfloat_really_inline void skip_zeros(const char*& first, const char* last) noexcept {
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 void
skip_zeros(UC const *&first, UC const *last) noexcept {
uint64_t val;
while (std::distance(first, last) >= 8) {
while (!cpp20_and_in_constexpr() &&
std::distance(first, last) >= int_cmp_len<UC>()) {
::memcpy(&val, first, sizeof(uint64_t));
if (val != 0x3030303030303030) {
if (val != int_cmp_zeros<UC>()) {
break;
}
first += 8;
first += int_cmp_len<UC>();
}
while (first != last) {
if (*first != '0') {
if (*first != UC('0')) {
break;
}
first++;
......@@ -168,52 +193,62 @@ fastfloat_really_inline void skip_zeros(const char*& first, const char* last) no
// determine if any non-zero digits were truncated.
// all characters must be valid digits.
fastfloat_really_inline bool is_truncated(const char* first, const char* last) noexcept {
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 bool
is_truncated(UC const *first, UC const *last) noexcept {
// do 8-bit optimizations, can just compare to 8 literal 0s.
uint64_t val;
while (std::distance(first, last) >= 8) {
while (!cpp20_and_in_constexpr() &&
std::distance(first, last) >= int_cmp_len<UC>()) {
::memcpy(&val, first, sizeof(uint64_t));
if (val != 0x3030303030303030) {
if (val != int_cmp_zeros<UC>()) {
return true;
}
first += 8;
first += int_cmp_len<UC>();
}
while (first != last) {
if (*first != '0') {
if (*first != UC('0')) {
return true;
}
first++;
++first;
}
return false;
}
fastfloat_really_inline bool is_truncated(byte_span s) noexcept {
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 bool
is_truncated(span<UC const> s) noexcept {
return is_truncated(s.ptr, s.ptr + s.len());
}
fastfloat_really_inline
void parse_eight_digits(const char*& p, limb& value, size_t& counter, size_t& count) noexcept {
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 void
parse_eight_digits(UC const *&p, limb &value, size_t &counter,
size_t &count) noexcept {
value = value * 100000000 + parse_eight_digits_unrolled(p);
p += 8;
counter += 8;
count += 8;
}
fastfloat_really_inline
void parse_one_digit(const char*& p, limb& value, size_t& counter, size_t& count) noexcept {
value = value * 10 + limb(*p - '0');
template <typename UC>
fastfloat_really_inline FASTFLOAT_CONSTEXPR14 void
parse_one_digit(UC const *&p, limb &value, size_t &counter,
size_t &count) noexcept {
value = value * 10 + limb(*p - UC('0'));
p++;
counter++;
count++;
}
fastfloat_really_inline
void add_native(bigint& big, limb power, limb value) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 void
add_native(bigint &big, limb power, limb value) noexcept {
big.mul(power);
big.add(value);
}
fastfloat_really_inline void round_up_bigint(bigint& big, size_t& count) noexcept {
fastfloat_really_inline FASTFLOAT_CONSTEXPR20 void
round_up_bigint(bigint &big, size_t &count) noexcept {
// need to round-up the digits, but need to avoid rounding
// ....9999 to ...10000, which could cause a false halfway point.
add_native(big, 10, 1);
......@@ -221,7 +256,10 @@ fastfloat_really_inline void round_up_bigint(bigint& big, size_t& count) noexcep
}
// parse the significant digits into a big integer
inline void parse_mantissa(bigint& result, parsed_number_string& num, size_t max_digits, size_t& digits) noexcept {
template <typename UC>
inline FASTFLOAT_CONSTEXPR20 void
parse_mantissa(bigint &result, parsed_number_string_t<UC> &num,
size_t max_digits, size_t &digits) noexcept {
// try to minimize the number of big integer and scalar multiplication.
// therefore, try to parse 8 digits at a time, and multiply by the largest
// scalar value (9 or 19 digits) for each step.
......@@ -235,12 +273,13 @@ inline void parse_mantissa(bigint& result, parsed_number_string& num, size_t max
#endif
// process all integer digits.
const char* p = num.integer.ptr;
const char* pend = p + num.integer.len();
UC const *p = num.integer.ptr;
UC const *pend = p + num.integer.len();
skip_zeros(p, pend);
// process all digits, in increments of step per loop
while (p != pend) {
while ((std::distance(p, pend) >= 8) && (step - counter >= 8) && (max_digits - digits >= 8)) {
while ((std::distance(p, pend) >= 8) && (step - counter >= 8) &&
(max_digits - digits >= 8)) {
parse_eight_digits(p, value, counter, digits);
}
while (counter < step && p != pend && digits < max_digits) {
......@@ -273,7 +312,8 @@ inline void parse_mantissa(bigint& result, parsed_number_string& num, size_t max
}
// process all digits, in increments of step per loop
while (p != pend) {
while ((std::distance(p, pend) >= 8) && (step - counter >= 8) && (max_digits - digits >= 8)) {
while ((std::distance(p, pend) >= 8) && (step - counter >= 8) &&
(max_digits - digits >= 8)) {
parse_eight_digits(p, value, counter, digits);
}
while (counter < step && p != pend && digits < max_digits) {
......@@ -301,18 +341,23 @@ inline void parse_mantissa(bigint& result, parsed_number_string& num, size_t max
}
template <typename T>
inline adjusted_mantissa positive_digit_comp(bigint& bigmant, int32_t exponent) noexcept {
inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
positive_digit_comp(bigint &bigmant, int32_t exponent) noexcept {
FASTFLOAT_ASSERT(bigmant.pow10(uint32_t(exponent)));
adjusted_mantissa answer;
bool truncated;
answer.mantissa = bigmant.hi64(truncated);
int bias = binary_format<T>::mantissa_explicit_bits() - binary_format<T>::minimum_exponent();
int bias = binary_format<T>::mantissa_explicit_bits() -
binary_format<T>::minimum_exponent();
answer.power2 = bigmant.bit_length() - 64 + bias;
round<T>(answer, [truncated](adjusted_mantissa& a, int32_t shift) {
round_nearest_tie_even(a, shift, [truncated](bool is_odd, bool is_halfway, bool is_above) -> bool {
return is_above || (is_halfway && truncated) || (is_odd && is_halfway);
});
round<T>(answer, [truncated](adjusted_mantissa &a, int32_t shift) {
round_nearest_tie_even(
a, shift,
[truncated](bool is_odd, bool is_halfway, bool is_above) -> bool {
return is_above || (is_halfway && truncated) ||
(is_odd && is_halfway);
});
});
return answer;
......@@ -324,14 +369,17 @@ inline adjusted_mantissa positive_digit_comp(bigint& bigmant, int32_t exponent)
// we then need to scale by `2^(f- e)`, and then the two significant digits
// are of the same magnitude.
template <typename T>
inline adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa am, int32_t exponent) noexcept {
bigint& real_digits = bigmant;
inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa negative_digit_comp(
bigint &bigmant, adjusted_mantissa am, int32_t exponent) noexcept {
bigint &real_digits = bigmant;
int32_t real_exp = exponent;
// get the value of `b`, rounded down, and get a bigint representation of b+h
adjusted_mantissa am_b = am;
// gcc7 buf: use a lambda to remove the noexcept qualifier bug with -Wnoexcept-type.
round<T>(am_b, [](adjusted_mantissa&a, int32_t shift) { round_down(a, shift); });
// gcc7 buf: use a lambda to remove the noexcept qualifier bug with
// -Wnoexcept-type.
round<T>(am_b,
[](adjusted_mantissa &a, int32_t shift) { round_down(a, shift); });
T b;
to_float(false, am_b, b);
adjusted_mantissa theor = to_extended_halfway(b);
......@@ -353,18 +401,19 @@ inline adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa
// compare digits, and use it to director rounding
int ord = real_digits.compare(theor_digits);
adjusted_mantissa answer = am;
round<T>(answer, [ord](adjusted_mantissa& a, int32_t shift) {
round_nearest_tie_even(a, shift, [ord](bool is_odd, bool _, bool __) -> bool {
(void)_; // not needed, since we've done our comparison
(void)__; // not needed, since we've done our comparison
if (ord > 0) {
return true;
} else if (ord < 0) {
return false;
} else {
return is_odd;
}
});
round<T>(answer, [ord](adjusted_mantissa &a, int32_t shift) {
round_nearest_tie_even(
a, shift, [ord](bool is_odd, bool _, bool __) -> bool {
(void)_; // not needed, since we've done our comparison
(void)__; // not needed, since we've done our comparison
if (ord > 0) {
return true;
} else if (ord < 0) {
return false;
} else {
return is_odd;
}
});
});
return answer;
......@@ -383,8 +432,9 @@ inline adjusted_mantissa negative_digit_comp(bigint& bigmant, adjusted_mantissa
// `b` as a big-integer type, scaled to the same binary exponent as
// the actual digits. we then compare the big integer representations
// of both, and use that to direct rounding.
template <typename T>
inline adjusted_mantissa digit_comp(parsed_number_string& num, adjusted_mantissa am) noexcept {
template <typename T, typename UC>
inline FASTFLOAT_CONSTEXPR20 adjusted_mantissa
digit_comp(parsed_number_string_t<UC> &num, adjusted_mantissa am) noexcept {
// remove the invalid exponent bias
am.power2 -= invalid_am_bias;
......
#ifndef FASTFLOAT_FAST_FLOAT_H
#define FASTFLOAT_FAST_FLOAT_H
#include "vtkfast_float_mangle.h"
#include <system_error>
#include "float_common.h"
namespace fast_float {
enum chars_format {
scientific = 1<<0,
fixed = 1<<2,
hex = 1<<3,
general = fixed | scientific
};
struct from_chars_result {
const char *ptr;
std::errc ec;
};
struct parse_options {
constexpr explicit parse_options(chars_format fmt = chars_format::general,
char dot = '.')
: format(fmt), decimal_point(dot) {}
/** Which number formats are accepted */
chars_format format;
/** The character used as decimal point */
char decimal_point;
};
/**
* This function parses the character sequence [first,last) for a number. It parses floating-point numbers expecting
* a locale-indepent format equivalent to what is used by std::strtod in the default ("C") locale.
* The resulting floating-point value is the closest floating-point values (using either float or double),
* using the "round to even" convention for values that would otherwise fall right in-between two values.
* That is, we provide exact parsing according to the IEEE standard.
* This function parses the character sequence [first,last) for a number. It
* parses floating-point numbers expecting a locale-indepent format equivalent
* to what is used by std::strtod in the default ("C") locale. The resulting
* floating-point value is the closest floating-point values (using either float
* or double), using the "round to even" convention for values that would
* otherwise fall right in-between two values. That is, we provide exact parsing
* according to the IEEE standard.
*
* Given a successful parse, the pointer (`ptr`) in the returned value is set to point right after the
* parsed number, and the `value` referenced is set to the parsed value. In case of error, the returned
* `ec` contains a representative error, otherwise the default (`std::errc()`) value is stored.
* Given a successful parse, the pointer (`ptr`) in the returned value is set to
* point right after the parsed number, and the `value` referenced is set to the
* parsed value. In case of error, the returned `ec` contains a representative
* error, otherwise the default (`std::errc()`) value is stored.
*
* The implementation does not throw and does not allocate memory (e.g., with `new` or `malloc`).
* The implementation does not throw and does not allocate memory (e.g., with
* `new` or `malloc`).
*
* Like the C++17 standard, the `fast_float::from_chars` functions take an optional last argument of
* the type `fast_float::chars_format`. It is a bitset value: we check whether
* `fmt & fast_float::chars_format::fixed` and `fmt & fast_float::chars_format::scientific` are set
* to determine whether we allow the fixed point and scientific notation respectively.
* The default is `fast_float::chars_format::general` which allows both `fixed` and `scientific`.
* Like the C++17 standard, the `fast_float::from_chars` functions take an
* optional last argument of the type `fast_float::chars_format`. It is a bitset
* value: we check whether `fmt & fast_float::chars_format::fixed` and `fmt &
* fast_float::chars_format::scientific` are set to determine whether we allow
* the fixed point and scientific notation respectively. The default is
* `fast_float::chars_format::general` which allows both `fixed` and
* `scientific`.
*/
template<typename T>
from_chars_result from_chars(const char *first, const char *last,
T &value, chars_format fmt = chars_format::general) noexcept;
template <typename T, typename UC = char,
typename = FASTFLOAT_ENABLE_IF(is_supported_float_type<T>::value)>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars(UC const *first, UC const *last, T &value,
chars_format fmt = chars_format::general) noexcept;
/**
* Like from_chars, but accepts an `options` argument to govern number parsing.
* Both for floating-point types and integer types.
*/
template<typename T>
from_chars_result from_chars_advanced(const char *first, const char *last,
T &value, parse_options options) noexcept;
template <typename T, typename UC = char>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars_advanced(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept;
/**
* from_chars for integer types.
*/
template <typename T, typename UC = char,
typename = FASTFLOAT_ENABLE_IF(is_supported_integer_type<T>::value)>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars(UC const *first, UC const *last, T &value, int base = 10) noexcept;
} // namespace fast_float
#include "parse_number.h"
#endif // FASTFLOAT_FAST_FLOAT_H
......@@ -4,6 +4,7 @@
#include "ascii_number.h"
#include "decimal_to_binary.h"
#include "digit_comparison.h"
#include "float_common.h"
#include <cmath>
#include <cstring>
......@@ -12,47 +13,56 @@
namespace fast_float {
namespace detail {
/**
* Special case +inf, -inf, nan, infinity, -infinity.
* The case comparisons could be made much faster given that we know that the
* strings a null-free and fixed.
**/
template <typename T>
from_chars_result parse_infnan(const char *first, const char *last, T &value) noexcept {
from_chars_result answer;
template <typename T, typename UC>
from_chars_result_t<UC>
FASTFLOAT_CONSTEXPR14 parse_infnan(UC const *first, UC const *last,
T &value, chars_format fmt) noexcept {
from_chars_result_t<UC> answer{};
answer.ptr = first;
answer.ec = std::errc(); // be optimistic
bool minusSign = false;
if (*first == '-') { // assume first < last, so dereference without checks; C++17 20.19.3.(7.1) explicitly forbids '+' here
minusSign = true;
++first;
// assume first < last, so dereference without checks;
bool const minusSign = (*first == UC('-'));
// C++17 20.19.3.(7.1) explicitly forbids '+' sign here
if ((*first == UC('-')) ||
(uint64_t(fmt & chars_format::allow_leading_plus) &&
(*first == UC('+')))) {
++first;
}
if (last - first >= 3) {
if (fastfloat_strncasecmp(first, "nan", 3)) {
if (fastfloat_strncasecmp(first, str_const_nan<UC>(), 3)) {
answer.ptr = (first += 3);
value = minusSign ? -std::numeric_limits<T>::quiet_NaN() : std::numeric_limits<T>::quiet_NaN();
// Check for possible nan(n-char-seq-opt), C++17 20.19.3.7, C11 7.20.1.3.3. At least MSVC produces nan(ind) and nan(snan).
if(first != last && *first == '(') {
for(const char* ptr = first + 1; ptr != last; ++ptr) {
if (*ptr == ')') {
value = minusSign ? -std::numeric_limits<T>::quiet_NaN()
: std::numeric_limits<T>::quiet_NaN();
// Check for possible nan(n-char-seq-opt), C++17 20.19.3.7,
// C11 7.20.1.3.3. At least MSVC produces nan(ind) and nan(snan).
if (first != last && *first == UC('(')) {
for (UC const *ptr = first + 1; ptr != last; ++ptr) {
if (*ptr == UC(')')) {
answer.ptr = ptr + 1; // valid nan(n-char-seq-opt)
break;
}
else if(!(('a' <= *ptr && *ptr <= 'z') || ('A' <= *ptr && *ptr <= 'Z') || ('0' <= *ptr && *ptr <= '9') || *ptr == '_'))
} else if (!((UC('a') <= *ptr && *ptr <= UC('z')) ||
(UC('A') <= *ptr && *ptr <= UC('Z')) ||
(UC('0') <= *ptr && *ptr <= UC('9')) || *ptr == UC('_')))
break; // forbidden char, not nan(n-char-seq-opt)
}
}
return answer;
}
if (fastfloat_strncasecmp(first, "inf", 3)) {
if ((last - first >= 8) && fastfloat_strncasecmp(first + 3, "inity", 5)) {
if (fastfloat_strncasecmp(first, str_const_inf<UC>(), 3)) {
if ((last - first >= 8) &&
fastfloat_strncasecmp(first + 3, str_const_inf<UC>() + 3, 5)) {
answer.ptr = first + 8;
} else {
answer.ptr = first + 3;
}
value = minusSign ? -std::numeric_limits<T>::infinity() : std::numeric_limits<T>::infinity();
value = minusSign ? -std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::infinity();
return answer;
}
}
......@@ -66,6 +76,10 @@ from_chars_result parse_infnan(const char *first, const char *last, T &value) n
* Credit : @mwalcott3
*/
fastfloat_really_inline bool rounds_to_nearest() noexcept {
// https://lemire.me/blog/2020/06/26/gcc-not-nearest/
#if (FLT_EVAL_METHOD != 1) && (FLT_EVAL_METHOD != 0)
return false;
#endif
// See
// A fast function to check your floating-point rounding mode
// https://lemire.me/blog/2022/11/16/a-fast-function-to-check-your-floating-point-rounding-mode/
......@@ -76,88 +90,130 @@ fastfloat_really_inline bool rounds_to_nearest() noexcept {
// However, it is expected to be much faster than the fegetround()
// function call.
//
// The volatile keywoard prevents the compiler from computing the function
// The volatile keyword prevents the compiler from computing the function
// at compile-time.
// There might be other ways to prevent compile-time optimizations (e.g., asm).
// The value does not need to be std::numeric_limits<float>::min(), any small
// value so that 1 + x should round to 1 would do (after accounting for excess
// precision, as in 387 instructions).
static volatile float fmin = std::numeric_limits<float>::min();
// There might be other ways to prevent compile-time optimizations (e.g.,
// asm). The value does not need to be std::numeric_limits<float>::min(), any
// small value so that 1 + x should round to 1 would do (after accounting for
// excess precision, as in 387 instructions).
static float volatile fmin = std::numeric_limits<float>::min();
float fmini = fmin; // we copy it so that it gets loaded at most once.
//
// Explanation:
// Only when fegetround() == FE_TONEAREST do we have that
// fmin + 1.0f == 1.0f - fmin.
//
// FE_UPWARD:
// fmin + 1.0f > 1
// 1.0f - fmin == 1
//
// FE_DOWNWARD or FE_TOWARDZERO:
// fmin + 1.0f == 1
// 1.0f - fmin < 1
//
// Note: This may fail to be accurate if fast-math has been
// enabled, as rounding conventions may not apply.
#if FASTFLOAT_VISUAL_STUDIO
# pragma warning(push)
// todo: is there a VS warning?
// see https://stackoverflow.com/questions/46079446/is-there-a-warning-for-floating-point-equality-checking-in-visual-studio-2013
#elif defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wfloat-equal"
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wfloat-equal"
#endif
//
// Explanation:
// Only when fegetround() == FE_TONEAREST do we have that
// fmin + 1.0f == 1.0f - fmin.
//
// FE_UPWARD:
// fmin + 1.0f > 1
// 1.0f - fmin == 1
//
// FE_DOWNWARD or FE_TOWARDZERO:
// fmin + 1.0f == 1
// 1.0f - fmin < 1
//
// Note: This may fail to be accurate if fast-math has been
// enabled, as rounding conventions may not apply.
#ifdef FASTFLOAT_VISUAL_STUDIO
#pragma warning(push)
// todo: is there a VS warning?
// see
// https://stackoverflow.com/questions/46079446/is-there-a-warning-for-floating-point-equality-checking-in-visual-studio-2013
#elif defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wfloat-equal"
#endif
return (fmini + 1.0f == 1.0f - fmini);
#if FASTFLOAT_VISUAL_STUDIO
# pragma warning(pop)
#elif defined(__clang__)
# pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif
#ifdef FASTFLOAT_VISUAL_STUDIO
#pragma warning(pop)
#elif defined(__clang__)
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
} // namespace detail
template<typename T>
from_chars_result from_chars(const char *first, const char *last,
T &value, chars_format fmt /*= chars_format::general*/) noexcept {
return from_chars_advanced(first, last, value, parse_options{fmt});
template <typename T> struct from_chars_caller {
template <typename UC>
FASTFLOAT_CONSTEXPR20 static from_chars_result_t<UC>
call(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
return from_chars_advanced(first, last, value, options);
}
};
#ifdef __STDCPP_FLOAT32_T__
template <> struct from_chars_caller<std::float32_t> {
template <typename UC>
FASTFLOAT_CONSTEXPR20 static from_chars_result_t<UC>
call(UC const *first, UC const *last, std::float32_t &value,
parse_options_t<UC> options) noexcept {
// if std::float32_t is defined, and we are in C++23 mode; macro set for
// float32; set value to float due to equivalence between float and
// float32_t
float val;
auto ret = from_chars_advanced(first, last, val, options);
value = val;
return ret;
}
};
#endif
#ifdef __STDCPP_FLOAT64_T__
template <> struct from_chars_caller<std::float64_t> {
template <typename UC>
FASTFLOAT_CONSTEXPR20 static from_chars_result_t<UC>
call(UC const *first, UC const *last, std::float64_t &value,
parse_options_t<UC> options) noexcept {
// if std::float64_t is defined, and we are in C++23 mode; macro set for
// float64; set value as double due to equivalence between double and
// float64_t
double val;
auto ret = from_chars_advanced(first, last, val, options);
value = val;
return ret;
}
};
#endif
template <typename T, typename UC, typename>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars(UC const *first, UC const *last, T &value,
chars_format fmt /*= chars_format::general*/) noexcept {
return from_chars_caller<T>::call(first, last, value,
parse_options_t<UC>(fmt));
}
template<typename T>
from_chars_result from_chars_advanced(const char *first, const char *last,
T &value, parse_options options) noexcept {
/**
* This function overload takes parsed_number_string_t structure that is created
* and populated either by from_chars_advanced function taking chars range and
* parsing options or other parsing custom function implemented by user.
*/
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars_advanced(parsed_number_string_t<UC> &pns, T &value) noexcept {
static_assert (std::is_same<T, double>::value || std::is_same<T, float>::value, "only float and double are supported");
static_assert(is_supported_float_type<T>::value,
"only some floating-point types are supported");
static_assert(is_supported_char_type<UC>::value,
"only char, wchar_t, char16_t and char32_t are supported");
from_chars_result_t<UC> answer;
from_chars_result answer;
#if FASTFLOAT_SKIP_WHITE_SPACE // disabled by default
while ((first != last) && fast_float::is_space(uint8_t(*first))) {
first++;
}
#endif
if (first == last) {
answer.ec = std::errc::invalid_argument;
answer.ptr = first;
return answer;
}
parsed_number_string pns = parse_number_string(first, last, options);
if (!pns.valid) {
return detail::parse_infnan(first, last, value);
}
answer.ec = std::errc(); // be optimistic
answer.ptr = pns.lastmatch;
// The implementation of the Clinger's fast path is convoluted because
// we want round-to-nearest in all cases, irrespective of the rounding mode
// selected on the thread.
// We proceed optimistically, assuming that detail::rounds_to_nearest() returns
// true.
if (binary_format<T>::min_exponent_fast_path() <= pns.exponent && pns.exponent <= binary_format<T>::max_exponent_fast_path() && !pns.too_many_digits) {
// We proceed optimistically, assuming that detail::rounds_to_nearest()
// returns true.
if (binary_format<T>::min_exponent_fast_path() <= pns.exponent &&
pns.exponent <= binary_format<T>::max_exponent_fast_path() &&
!pns.too_many_digits) {
// Unfortunately, the conventional Clinger's fast path is only possible
// when the system rounds to the nearest float.
//
......@@ -165,46 +221,181 @@ from_chars_result from_chars_advanced(const char *first, const char *last,
// We could check it first (before the previous branch), but
// there might be performance advantages at having the check
// be last.
if(detail::rounds_to_nearest()) {
if (!cpp20_and_in_constexpr() && detail::rounds_to_nearest()) {
// We have that fegetround() == FE_TONEAREST.
// Next is Clinger's fast path.
if (pns.mantissa <=binary_format<T>::max_mantissa_fast_path()) {
if (pns.mantissa <= binary_format<T>::max_mantissa_fast_path()) {
value = T(pns.mantissa);
if (pns.exponent < 0) { value = value / binary_format<T>::exact_power_of_ten(-pns.exponent); }
else { value = value * binary_format<T>::exact_power_of_ten(pns.exponent); }
if (pns.negative) { value = -value; }
if (pns.exponent < 0) {
value = value / binary_format<T>::exact_power_of_ten(-pns.exponent);
} else {
value = value * binary_format<T>::exact_power_of_ten(pns.exponent);
}
if (pns.negative) {
value = -value;
}
return answer;
}
} else {
// We do not have that fegetround() == FE_TONEAREST.
// Next is a modified Clinger's fast path, inspired by Jakub Jelínek's proposal
if (pns.exponent >= 0 && pns.mantissa <=binary_format<T>::max_mantissa_fast_path(pns.exponent)) {
#if defined(__clang__)
// Next is a modified Clinger's fast path, inspired by Jakub Jelínek's
// proposal
if (pns.exponent >= 0 &&
pns.mantissa <=
binary_format<T>::max_mantissa_fast_path(pns.exponent)) {
#if defined(__clang__) || defined(FASTFLOAT_32BIT)
// Clang may map 0 to -0.0 when fegetround() == FE_DOWNWARD
if(pns.mantissa == 0) {
value = 0;
if (pns.mantissa == 0) {
value = pns.negative ? T(-0.) : T(0.);
return answer;
}
#endif
value = T(pns.mantissa) * binary_format<T>::exact_power_of_ten(pns.exponent);
if (pns.negative) { value = -value; }
value = T(pns.mantissa) *
binary_format<T>::exact_power_of_ten(pns.exponent);
if (pns.negative) {
value = -value;
}
return answer;
}
}
}
adjusted_mantissa am = compute_float<binary_format<T>>(pns.exponent, pns.mantissa);
if(pns.too_many_digits && am.power2 >= 0) {
if(am != compute_float<binary_format<T>>(pns.exponent, pns.mantissa + 1)) {
adjusted_mantissa am =
compute_float<binary_format<T>>(pns.exponent, pns.mantissa);
if (pns.too_many_digits && am.power2 >= 0) {
if (am != compute_float<binary_format<T>>(pns.exponent, pns.mantissa + 1)) {
am = compute_error<binary_format<T>>(pns.exponent, pns.mantissa);
}
}
// If we called compute_float<binary_format<T>>(pns.exponent, pns.mantissa) and we have an invalid power (am.power2 < 0),
// then we need to go the long way around again. This is very uncommon.
if(am.power2 < 0) { am = digit_comp<T>(pns, am); }
// If we called compute_float<binary_format<T>>(pns.exponent, pns.mantissa)
// and we have an invalid power (am.power2 < 0), then we need to go the long
// way around again. This is very uncommon.
if (am.power2 < 0) {
am = digit_comp<T>(pns, am);
}
to_float(pns.negative, am, value);
// Test for over/underflow.
if ((pns.mantissa != 0 && am.mantissa == 0 && am.power2 == 0) ||
am.power2 == binary_format<T>::infinite_power()) {
answer.ec = std::errc::result_out_of_range;
}
return answer;
}
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars_float_advanced(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
static_assert(is_supported_float_type<T>::value,
"only some floating-point types are supported");
static_assert(is_supported_char_type<UC>::value,
"only char, wchar_t, char16_t and char32_t are supported");
chars_format const fmt = detail::adjust_for_feature_macros(options.format);
from_chars_result_t<UC> answer;
if (uint64_t(fmt & chars_format::skip_white_space)) {
while ((first != last) && fast_float::is_space(*first)) {
first++;
}
}
if (first == last) {
answer.ec = std::errc::invalid_argument;
answer.ptr = first;
return answer;
}
parsed_number_string_t<UC> pns =
uint64_t(fmt & detail::basic_json_fmt)
? parse_number_string<true, UC>(first, last, options)
: parse_number_string<false, UC>(first, last, options);
if (!pns.valid) {
if (uint64_t(fmt & chars_format::no_infnan)) {
answer.ec = std::errc::invalid_argument;
answer.ptr = first;
return answer;
} else {
return detail::parse_infnan(first, last, value, fmt);
}
}
// call overload that takes parsed_number_string_t directly.
return from_chars_advanced(pns, value);
}
template <typename T, typename UC, typename>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars(UC const *first, UC const *last, T &value, int base) noexcept {
static_assert(is_supported_integer_type<T>::value,
"only integer types are supported");
static_assert(is_supported_char_type<UC>::value,
"only char, wchar_t, char16_t and char32_t are supported");
parse_options_t<UC> options;
options.base = base;
return from_chars_advanced(first, last, value, options);
}
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars_int_advanced(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
static_assert(is_supported_integer_type<T>::value,
"only integer types are supported");
static_assert(is_supported_char_type<UC>::value,
"only char, wchar_t, char16_t and char32_t are supported");
chars_format const fmt = detail::adjust_for_feature_macros(options.format);
int const base = options.base;
from_chars_result_t<UC> answer;
if (uint64_t(fmt & chars_format::skip_white_space)) {
while ((first != last) && fast_float::is_space(*first)) {
first++;
}
}
if (first == last || base < 2 || base > 36) {
answer.ec = std::errc::invalid_argument;
answer.ptr = first;
return answer;
}
return parse_int_string(first, last, value, options);
}
template <size_t TypeIx> struct from_chars_advanced_caller {
static_assert(TypeIx > 0, "unsupported type");
};
template <> struct from_chars_advanced_caller<1> {
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 static from_chars_result_t<UC>
call(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
return from_chars_float_advanced(first, last, value, options);
}
};
template <> struct from_chars_advanced_caller<2> {
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 static from_chars_result_t<UC>
call(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
return from_chars_int_advanced(first, last, value, options);
}
};
template <typename T, typename UC>
FASTFLOAT_CONSTEXPR20 from_chars_result_t<UC>
from_chars_advanced(UC const *first, UC const *last, T &value,
parse_options_t<UC> options) noexcept {
return from_chars_advanced_caller<
size_t(is_supported_float_type<T>::value) +
2 * size_t(is_supported_integer_type<T>::value)>::call(first, last, value,
options);
}
} // namespace fast_float
#endif
#ifndef FASTFLOAT_GENERIC_DECIMAL_TO_BINARY_H
#define FASTFLOAT_GENERIC_DECIMAL_TO_BINARY_H
/**
* This code is meant to handle the case where we have more than 19 digits.
*
* It is based on work by Nigel Tao (at https://github.com/google/wuffs/)
* who credits Ken Thompson for the design (via a reference to the Go source
* code).
*
* Rob Pike suggested that this algorithm be called "Simple Decimal Conversion".
*
* It is probably not very fast but it is a fallback that should almost never
* be used in real life. Though it is not fast, it is "easily" understood and debugged.
**/
#include "ascii_number.h"
#include "decimal_to_binary.h"
#include <cstdint>
namespace fast_float {
namespace detail {
// remove all final zeroes
inline void trim(decimal &h) {
while ((h.num_digits > 0) && (h.digits[h.num_digits - 1] == 0)) {
h.num_digits--;
}
}
inline uint32_t number_of_digits_decimal_left_shift(const decimal &h, uint32_t shift) {
shift &= 63;
constexpr uint16_t number_of_digits_decimal_left_shift_table[65] = {
0x0000, 0x0800, 0x0801, 0x0803, 0x1006, 0x1009, 0x100D, 0x1812, 0x1817,
0x181D, 0x2024, 0x202B, 0x2033, 0x203C, 0x2846, 0x2850, 0x285B, 0x3067,
0x3073, 0x3080, 0x388E, 0x389C, 0x38AB, 0x38BB, 0x40CC, 0x40DD, 0x40EF,
0x4902, 0x4915, 0x4929, 0x513E, 0x5153, 0x5169, 0x5180, 0x5998, 0x59B0,
0x59C9, 0x61E3, 0x61FD, 0x6218, 0x6A34, 0x6A50, 0x6A6D, 0x6A8B, 0x72AA,
0x72C9, 0x72E9, 0x7B0A, 0x7B2B, 0x7B4D, 0x8370, 0x8393, 0x83B7, 0x83DC,
0x8C02, 0x8C28, 0x8C4F, 0x9477, 0x949F, 0x94C8, 0x9CF2, 0x051C, 0x051C,
0x051C, 0x051C,
};
uint32_t x_a = number_of_digits_decimal_left_shift_table[shift];
uint32_t x_b = number_of_digits_decimal_left_shift_table[shift + 1];
uint32_t num_new_digits = x_a >> 11;
uint32_t pow5_a = 0x7FF & x_a;
uint32_t pow5_b = 0x7FF & x_b;
constexpr uint8_t
number_of_digits_decimal_left_shift_table_powers_of_5[0x051C] = {
5, 2, 5, 1, 2, 5, 6, 2, 5, 3, 1, 2, 5, 1, 5, 6, 2, 5, 7, 8, 1, 2, 5, 3,
9, 0, 6, 2, 5, 1, 9, 5, 3, 1, 2, 5, 9, 7, 6, 5, 6, 2, 5, 4, 8, 8, 2, 8,
1, 2, 5, 2, 4, 4, 1, 4, 0, 6, 2, 5, 1, 2, 2, 0, 7, 0, 3, 1, 2, 5, 6, 1,
0, 3, 5, 1, 5, 6, 2, 5, 3, 0, 5, 1, 7, 5, 7, 8, 1, 2, 5, 1, 5, 2, 5, 8,
7, 8, 9, 0, 6, 2, 5, 7, 6, 2, 9, 3, 9, 4, 5, 3, 1, 2, 5, 3, 8, 1, 4, 6,
9, 7, 2, 6, 5, 6, 2, 5, 1, 9, 0, 7, 3, 4, 8, 6, 3, 2, 8, 1, 2, 5, 9, 5,
3, 6, 7, 4, 3, 1, 6, 4, 0, 6, 2, 5, 4, 7, 6, 8, 3, 7, 1, 5, 8, 2, 0, 3,
1, 2, 5, 2, 3, 8, 4, 1, 8, 5, 7, 9, 1, 0, 1, 5, 6, 2, 5, 1, 1, 9, 2, 0,
9, 2, 8, 9, 5, 5, 0, 7, 8, 1, 2, 5, 5, 9, 6, 0, 4, 6, 4, 4, 7, 7, 5, 3,
9, 0, 6, 2, 5, 2, 9, 8, 0, 2, 3, 2, 2, 3, 8, 7, 6, 9, 5, 3, 1, 2, 5, 1,
4, 9, 0, 1, 1, 6, 1, 1, 9, 3, 8, 4, 7, 6, 5, 6, 2, 5, 7, 4, 5, 0, 5, 8,
0, 5, 9, 6, 9, 2, 3, 8, 2, 8, 1, 2, 5, 3, 7, 2, 5, 2, 9, 0, 2, 9, 8, 4,
6, 1, 9, 1, 4, 0, 6, 2, 5, 1, 8, 6, 2, 6, 4, 5, 1, 4, 9, 2, 3, 0, 9, 5,
7, 0, 3, 1, 2, 5, 9, 3, 1, 3, 2, 2, 5, 7, 4, 6, 1, 5, 4, 7, 8, 5, 1, 5,
6, 2, 5, 4, 6, 5, 6, 6, 1, 2, 8, 7, 3, 0, 7, 7, 3, 9, 2, 5, 7, 8, 1, 2,
5, 2, 3, 2, 8, 3, 0, 6, 4, 3, 6, 5, 3, 8, 6, 9, 6, 2, 8, 9, 0, 6, 2, 5,
1, 1, 6, 4, 1, 5, 3, 2, 1, 8, 2, 6, 9, 3, 4, 8, 1, 4, 4, 5, 3, 1, 2, 5,
5, 8, 2, 0, 7, 6, 6, 0, 9, 1, 3, 4, 6, 7, 4, 0, 7, 2, 2, 6, 5, 6, 2, 5,
2, 9, 1, 0, 3, 8, 3, 0, 4, 5, 6, 7, 3, 3, 7, 0, 3, 6, 1, 3, 2, 8, 1, 2,
5, 1, 4, 5, 5, 1, 9, 1, 5, 2, 2, 8, 3, 6, 6, 8, 5, 1, 8, 0, 6, 6, 4, 0,
6, 2, 5, 7, 2, 7, 5, 9, 5, 7, 6, 1, 4, 1, 8, 3, 4, 2, 5, 9, 0, 3, 3, 2,
0, 3, 1, 2, 5, 3, 6, 3, 7, 9, 7, 8, 8, 0, 7, 0, 9, 1, 7, 1, 2, 9, 5, 1,
6, 6, 0, 1, 5, 6, 2, 5, 1, 8, 1, 8, 9, 8, 9, 4, 0, 3, 5, 4, 5, 8, 5, 6,
4, 7, 5, 8, 3, 0, 0, 7, 8, 1, 2, 5, 9, 0, 9, 4, 9, 4, 7, 0, 1, 7, 7, 2,
9, 2, 8, 2, 3, 7, 9, 1, 5, 0, 3, 9, 0, 6, 2, 5, 4, 5, 4, 7, 4, 7, 3, 5,
0, 8, 8, 6, 4, 6, 4, 1, 1, 8, 9, 5, 7, 5, 1, 9, 5, 3, 1, 2, 5, 2, 2, 7,
3, 7, 3, 6, 7, 5, 4, 4, 3, 2, 3, 2, 0, 5, 9, 4, 7, 8, 7, 5, 9, 7, 6, 5,
6, 2, 5, 1, 1, 3, 6, 8, 6, 8, 3, 7, 7, 2, 1, 6, 1, 6, 0, 2, 9, 7, 3, 9,
3, 7, 9, 8, 8, 2, 8, 1, 2, 5, 5, 6, 8, 4, 3, 4, 1, 8, 8, 6, 0, 8, 0, 8,
0, 1, 4, 8, 6, 9, 6, 8, 9, 9, 4, 1, 4, 0, 6, 2, 5, 2, 8, 4, 2, 1, 7, 0,
9, 4, 3, 0, 4, 0, 4, 0, 0, 7, 4, 3, 4, 8, 4, 4, 9, 7, 0, 7, 0, 3, 1, 2,
5, 1, 4, 2, 1, 0, 8, 5, 4, 7, 1, 5, 2, 0, 2, 0, 0, 3, 7, 1, 7, 4, 2, 2,
4, 8, 5, 3, 5, 1, 5, 6, 2, 5, 7, 1, 0, 5, 4, 2, 7, 3, 5, 7, 6, 0, 1, 0,
0, 1, 8, 5, 8, 7, 1, 1, 2, 4, 2, 6, 7, 5, 7, 8, 1, 2, 5, 3, 5, 5, 2, 7,
1, 3, 6, 7, 8, 8, 0, 0, 5, 0, 0, 9, 2, 9, 3, 5, 5, 6, 2, 1, 3, 3, 7, 8,
9, 0, 6, 2, 5, 1, 7, 7, 6, 3, 5, 6, 8, 3, 9, 4, 0, 0, 2, 5, 0, 4, 6, 4,
6, 7, 7, 8, 1, 0, 6, 6, 8, 9, 4, 5, 3, 1, 2, 5, 8, 8, 8, 1, 7, 8, 4, 1,
9, 7, 0, 0, 1, 2, 5, 2, 3, 2, 3, 3, 8, 9, 0, 5, 3, 3, 4, 4, 7, 2, 6, 5,
6, 2, 5, 4, 4, 4, 0, 8, 9, 2, 0, 9, 8, 5, 0, 0, 6, 2, 6, 1, 6, 1, 6, 9,
4, 5, 2, 6, 6, 7, 2, 3, 6, 3, 2, 8, 1, 2, 5, 2, 2, 2, 0, 4, 4, 6, 0, 4,
9, 2, 5, 0, 3, 1, 3, 0, 8, 0, 8, 4, 7, 2, 6, 3, 3, 3, 6, 1, 8, 1, 6, 4,
0, 6, 2, 5, 1, 1, 1, 0, 2, 2, 3, 0, 2, 4, 6, 2, 5, 1, 5, 6, 5, 4, 0, 4,
2, 3, 6, 3, 1, 6, 6, 8, 0, 9, 0, 8, 2, 0, 3, 1, 2, 5, 5, 5, 5, 1, 1, 1,
5, 1, 2, 3, 1, 2, 5, 7, 8, 2, 7, 0, 2, 1, 1, 8, 1, 5, 8, 3, 4, 0, 4, 5,
4, 1, 0, 1, 5, 6, 2, 5, 2, 7, 7, 5, 5, 5, 7, 5, 6, 1, 5, 6, 2, 8, 9, 1,
3, 5, 1, 0, 5, 9, 0, 7, 9, 1, 7, 0, 2, 2, 7, 0, 5, 0, 7, 8, 1, 2, 5, 1,
3, 8, 7, 7, 7, 8, 7, 8, 0, 7, 8, 1, 4, 4, 5, 6, 7, 5, 5, 2, 9, 5, 3, 9,
5, 8, 5, 1, 1, 3, 5, 2, 5, 3, 9, 0, 6, 2, 5, 6, 9, 3, 8, 8, 9, 3, 9, 0,
3, 9, 0, 7, 2, 2, 8, 3, 7, 7, 6, 4, 7, 6, 9, 7, 9, 2, 5, 5, 6, 7, 6, 2,
6, 9, 5, 3, 1, 2, 5, 3, 4, 6, 9, 4, 4, 6, 9, 5, 1, 9, 5, 3, 6, 1, 4, 1,
8, 8, 8, 2, 3, 8, 4, 8, 9, 6, 2, 7, 8, 3, 8, 1, 3, 4, 7, 6, 5, 6, 2, 5,
1, 7, 3, 4, 7, 2, 3, 4, 7, 5, 9, 7, 6, 8, 0, 7, 0, 9, 4, 4, 1, 1, 9, 2,
4, 4, 8, 1, 3, 9, 1, 9, 0, 6, 7, 3, 8, 2, 8, 1, 2, 5, 8, 6, 7, 3, 6, 1,
7, 3, 7, 9, 8, 8, 4, 0, 3, 5, 4, 7, 2, 0, 5, 9, 6, 2, 2, 4, 0, 6, 9, 5,
9, 5, 3, 3, 6, 9, 1, 4, 0, 6, 2, 5,
};
const uint8_t *pow5 =
&number_of_digits_decimal_left_shift_table_powers_of_5[pow5_a];
uint32_t i = 0;
uint32_t n = pow5_b - pow5_a;
for (; i < n; i++) {
if (i >= h.num_digits) {
return num_new_digits - 1;
} else if (h.digits[i] == pow5[i]) {
continue;
} else if (h.digits[i] < pow5[i]) {
return num_new_digits - 1;
} else {
return num_new_digits;
}
}
return num_new_digits;
}
inline uint64_t round(decimal &h) {
if ((h.num_digits == 0) || (h.decimal_point < 0)) {
return 0;
} else if (h.decimal_point > 18) {
return UINT64_MAX;
}
// at this point, we know that h.decimal_point >= 0
uint32_t dp = uint32_t(h.decimal_point);
uint64_t n = 0;
for (uint32_t i = 0; i < dp; i++) {
n = (10 * n) + ((i < h.num_digits) ? h.digits[i] : 0);
}
bool round_up = false;
if (dp < h.num_digits) {
round_up = h.digits[dp] >= 5; // normally, we round up
// but we may need to round to even!
if ((h.digits[dp] == 5) && (dp + 1 == h.num_digits)) {
round_up = h.truncated || ((dp > 0) && (1 & h.digits[dp - 1]));
}
}
if (round_up) {
n++;
}
return n;
}
// computes h * 2^-shift
inline void decimal_left_shift(decimal &h, uint32_t shift) {
if (h.num_digits == 0) {
return;
}
uint32_t num_new_digits = number_of_digits_decimal_left_shift(h, shift);
int32_t read_index = int32_t(h.num_digits - 1);
uint32_t write_index = h.num_digits - 1 + num_new_digits;
uint64_t n = 0;
while (read_index >= 0) {
n += uint64_t(h.digits[read_index]) << shift;
uint64_t quotient = n / 10;
uint64_t remainder = n - (10 * quotient);
if (write_index < max_digits) {
h.digits[write_index] = uint8_t(remainder);
} else if (remainder > 0) {
h.truncated = true;
}
n = quotient;
write_index--;
read_index--;
}
while (n > 0) {
uint64_t quotient = n / 10;
uint64_t remainder = n - (10 * quotient);
if (write_index < max_digits) {
h.digits[write_index] = uint8_t(remainder);
} else if (remainder > 0) {
h.truncated = true;
}
n = quotient;
write_index--;
}
h.num_digits += num_new_digits;
if (h.num_digits > max_digits) {
h.num_digits = max_digits;
}
h.decimal_point += int32_t(num_new_digits);
trim(h);
}
// computes h * 2^shift
inline void decimal_right_shift(decimal &h, uint32_t shift) {
uint32_t read_index = 0;
uint32_t write_index = 0;
uint64_t n = 0;
while ((n >> shift) == 0) {
if (read_index < h.num_digits) {
n = (10 * n) + h.digits[read_index++];
} else if (n == 0) {
return;
} else {
while ((n >> shift) == 0) {
n = 10 * n;
read_index++;
}
break;
}
}
h.decimal_point -= int32_t(read_index - 1);
if (h.decimal_point < -decimal_point_range) { // it is zero
h.num_digits = 0;
h.decimal_point = 0;
h.negative = false;
h.truncated = false;
return;
}
uint64_t mask = (uint64_t(1) << shift) - 1;
while (read_index < h.num_digits) {
uint8_t new_digit = uint8_t(n >> shift);
n = (10 * (n & mask)) + h.digits[read_index++];
h.digits[write_index++] = new_digit;
}
while (n > 0) {
uint8_t new_digit = uint8_t(n >> shift);
n = 10 * (n & mask);
if (write_index < max_digits) {
h.digits[write_index++] = new_digit;
} else if (new_digit > 0) {
h.truncated = true;
}
}
h.num_digits = write_index;
trim(h);
}
} // namespace detail
template <typename binary>
adjusted_mantissa compute_float(decimal &d) {
adjusted_mantissa answer;
if (d.num_digits == 0) {
// should be zero
answer.power2 = 0;
answer.mantissa = 0;
return answer;
}
// At this point, going further, we can assume that d.num_digits > 0.
//
// We want to guard against excessive decimal point values because
// they can result in long running times. Indeed, we do
// shifts by at most 60 bits. We have that log(10**400)/log(2**60) ~= 22
// which is fine, but log(10**299995)/log(2**60) ~= 16609 which is not
// fine (runs for a long time).
//
if(d.decimal_point < -324) {
// We have something smaller than 1e-324 which is always zero
// in binary64 and binary32.
// It should be zero.
answer.power2 = 0;
answer.mantissa = 0;
return answer;
} else if(d.decimal_point >= 310) {
// We have something at least as large as 0.1e310 which is
// always infinite.
answer.power2 = binary::infinite_power();
answer.mantissa = 0;
return answer;
}
constexpr uint32_t max_shift = 60;
constexpr uint32_t num_powers = 19;
constexpr uint8_t decimal_powers[19] = {
0, 3, 6, 9, 13, 16, 19, 23, 26, 29, //
33, 36, 39, 43, 46, 49, 53, 56, 59, //
};
int32_t exp2 = 0;
while (d.decimal_point > 0) {
uint32_t n = uint32_t(d.decimal_point);
uint32_t shift = (n < num_powers) ? decimal_powers[n] : max_shift;
detail::decimal_right_shift(d, shift);
if (d.decimal_point < -decimal_point_range) {
// should be zero
answer.power2 = 0;
answer.mantissa = 0;
return answer;
}
exp2 += int32_t(shift);
}
// We shift left toward [1/2 ... 1].
while (d.decimal_point <= 0) {
uint32_t shift;
if (d.decimal_point == 0) {
if (d.digits[0] >= 5) {
break;
}
shift = (d.digits[0] < 2) ? 2 : 1;
} else {
uint32_t n = uint32_t(-d.decimal_point);
shift = (n < num_powers) ? decimal_powers[n] : max_shift;
}
detail::decimal_left_shift(d, shift);
if (d.decimal_point > decimal_point_range) {
// we want to get infinity:
answer.power2 = binary::infinite_power();
answer.mantissa = 0;
return answer;
}
exp2 -= int32_t(shift);
}
// We are now in the range [1/2 ... 1] but the binary format uses [1 ... 2].
exp2--;
constexpr int32_t minimum_exponent = binary::minimum_exponent();
while ((minimum_exponent + 1) > exp2) {
uint32_t n = uint32_t((minimum_exponent + 1) - exp2);
if (n > max_shift) {
n = max_shift;
}
detail::decimal_right_shift(d, n);
exp2 += int32_t(n);
}
if ((exp2 - minimum_exponent) >= binary::infinite_power()) {
answer.power2 = binary::infinite_power();
answer.mantissa = 0;
return answer;
}
const int mantissa_size_in_bits = binary::mantissa_explicit_bits() + 1;
detail::decimal_left_shift(d, mantissa_size_in_bits);
uint64_t mantissa = detail::round(d);
// It is possible that we have an overflow, in which case we need
// to shift back.
if(mantissa >= (uint64_t(1) << mantissa_size_in_bits)) {
detail::decimal_right_shift(d, 1);
exp2 += 1;
mantissa = detail::round(d);
if ((exp2 - minimum_exponent) >= binary::infinite_power()) {
answer.power2 = binary::infinite_power();
answer.mantissa = 0;
return answer;
}
}
answer.power2 = exp2 - binary::minimum_exponent();
if(mantissa < (uint64_t(1) << binary::mantissa_explicit_bits())) { answer.power2--; }
answer.mantissa = mantissa & ((uint64_t(1) << binary::mantissa_explicit_bits()) - 1);
return answer;
}
template <typename binary>
adjusted_mantissa parse_long_mantissa(const char *first, const char* last, parse_options options) {
decimal d = parse_decimal(first, last, options);
return compute_float<binary>(d);
}
} // namespace fast_float
#endif