Calling deco at the first Deco Stop

Disclaimer: These numbers are most certainly “WRONG!” You should NOT use this post or anything from a random only tool to plan or execute dives. You WILL get bent. Not “may”, but WILL. You know this. DO NOT rely on this tool.

Here’s a scenario that should never happen, but to quote the eloquent Mr. Mackey, “There are no stupid questions. Only stupid people.”

I decided to answer the following question full well know the question sounds stupid:

Suppose you make it to your first deco stop and want to adjust your deco based on what happened between your max depth, and hitting the deco stop. Say you had a reg failure and had to fix it. Or a scooter got entangled in the line. Or you had a reverse squeeze so you had to pause for a bit longer. Now you’re AT your deco stop, and you’ve got two things – your average depth by the time you hit the stop, and your total runtime for the dive.

Given those two numbers, if you had to calculate deco, how much would it vary based on calculating it as a true multi-level dive where you accounted for the pause as a scheduled “level stop”?

Side note: Once again, remember the purpose of these questions isn’t about what should happen or should never happen, but to create a strong feedback loop in ensuring what should NEVER happen as a function of the cost/risk you incur when you do so. Basically if it should turn out that stopping mid way, and not keeping track of where you stopped and how long you stopped is going to add a ridiculous increase in deco, then you HAVE to make sure you remember it. If it should turn out, it doesn’t add more than a few minutes of deco based on your avg depth observed at your gas switch, then you can in an emergency, get the hell up to that gas switch, switch over, and run the numbers based on what you saw then.

The Program

Here’s a quick program I whipped up which lets you do just that:

//This function is a utility to get total dive time out of all segments
var calculateDiveTime = function(diveplan) {
 var totalTime = 0;
 for (var index = 0; index < diveplan.length; index++) {
 var segment = diveplan[index];
 totalTime = totalTime + segment.time;
 }
 return totalTime;
}

var buhlmann = dive.deco.buhlmann();
console.log("Second Level depth, Avg Depth at Deco Stop, Multi-Level Deco Time, Avg Depth Deco Time")
for (var nextLevelTime = 5; nextLevelTime <= 30; nextLevelTime += 5) {
 for (var nextLevel=190; nextLevel > 70; nextLevel -= 10) {
 var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
 plan.addBottomGas("18/45", 0.21, 0.35);
 plan.addDecoGas("50%", 0.50, 0);
 plan.addDecoGas("100%", 1.0, 0);
 plan.addDepthChange(0, dive.feetToMeters(200), "18/45", 5);
 plan.addFlat(dive.feetToMeters(200), "18/45", 25);
 var bottomTime = 30; //5 + 25 to start with
 var cumulativeDepth = (25 * 200) + (5 * 100); //Avg depth so far (25 mins at 200, and 5 minutes at 100 - which is mid-point when descending).
 
 //add a add depth change to next level
 var depthDiff = 200 - nextLevel;
 var timeToLevel = depthDiff/60;
 plan.addDepthChange(dive.feetToMeters(200), dive.feetToMeters(nextLevel), "18/45", timeToLevel);
 bottomTime += timeToLevel;
 cumulativeDepth += (timeToLevel * (nextLevel+(depthDiff/2)));
 
 //add a segment at next level
 plan.addFlat(dive.feetToMeters(nextLevel), "18/45", nextLevelTime);
 bottomTime += nextLevelTime;
 cumulativeDepth += (nextLevelTime * nextLevel);
 
 depthDiff = nextLevel - 70;
 timeToLevel = depthDiff/60; //This is aggressive since we won't hit 70 feet at 60 fpm
 plan.addDepthChange(dive.feetToMeters(nextLevel), dive.feetToMeters(70), "18/45", timeToLevel);
 bottomTime += timeToLevel;
 cumulativeDepth += (timeToLevel * (70+(depthDiff/2)));
 
 var avgDepthAtDecoBegin = cumulativeDepth/bottomTime;
 
 var decoPlan = plan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
 
 var totalTime = calculateDiveTime(decoPlan);
 var decoTimeFromMaxDepth = totalTime - bottomTime;
 
 plan = new buhlmann.plan(buhlmann.ZH16BTissues);
 plan.addBottomGas("18/45", 0.21, 0.35);
 plan.addDecoGas("50%", 0.50, 0);
 plan.addDecoGas("100%", 1.0, 0);
 plan.addFlat(dive.feetToMeters(avgDepthAtDecoBegin), "18/45", bottomTime);
 decoPlan = plan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
 totalTime = calculateDiveTime(decoPlan);
 var decoTimeFromAvgDepth = totalTime - bottomTime;
 
 console.log(nextLevel + ", " + nextLevelTime + ", " + avgDepthAtDecoBegin + ", " + decoTimeFromMaxDepth + ", " + decoTimeFromAvgDepth);
 }
}



 

The Results (raw data)

The results I got were:

Second Level depth, Avg Depth at Deco Stop, Multi-Level Deco Time, Avg Depth Deco Time
190, 5, 181.41255605381164, 56.29999999999997, 56.52945470852016
180, 5, 180.06726457399105, 55.29999999999996, 55.488450224215235
170, 5, 178.7219730941704, 54.29999999999996, 55.4474457399103
160, 5, 177.37668161434976, 53.29999999999997, 54.40644125560536
150, 5, 176.03139013452915, 51.99999999999998, 53.36543677130043
140, 5, 174.68609865470853, 50.999999999999964, 52.324432286995496
130, 5, 173.34080717488786, 49.99999999999997, 51.28342780269057
120, 5, 171.99551569506727, 49.999999999999964, 51.24242331838564
110, 5, 170.65022421524665, 48.69999999999998, 50.20141883408069
100, 5, 169.30493273542598, 47.699999999999974, 50.16041434977576
90, 5, 167.9596412556054, 45.69999999999998, 49.119409865470836
80, 5, 166.61434977578477, 45.69999999999998, 48.0784053811659
190, 10, 182.43083003952566, 66.29999999999997, 67.56049169960473
180, 10, 180.0592885375494, 64.29999999999998, 65.48820711462449
170, 10, 177.68774703557312, 63.299999999999976, 63.415922529644256
160, 10, 175.31620553359681, 60.99999999999997, 62.34363794466401
150, 10, 172.94466403162056, 58.99999999999998, 60.27135335968378
140, 10, 170.57312252964428, 56.99999999999998, 59.19906877470354
130, 10, 168.20158102766797, 54.699999999999974, 57.12678418972331
120, 10, 165.83003952569172, 52.69999999999997, 55.054499604743064
110, 10, 163.45849802371544, 50.69999999999997, 53.98221501976284
100, 10, 161.08695652173913, 48.39999999999998, 51.909930434782595
90, 10, 158.71541501976284, 47.399999999999984, 50.837645849802364
80, 10, 156.34387351778656, 45.39999999999997, 49.76536126482211
190, 15, 183.23321554770317, 77.59999999999997, 78.58494840989397
180, 15, 180.05300353356887, 73.29999999999995, 75.48801554770316
170, 15, 176.87279151943463, 71.29999999999995, 72.39108268551234
160, 15, 173.69257950530033, 67.99999999999997, 70.29414982332155
150, 15, 170.5123674911661, 64.99999999999997, 67.19721696113072
140, 15, 167.33215547703182, 62.69999999999998, 65.10028409893991
130, 15, 164.15194346289752, 59.699999999999974, 62.00335123674911
120, 15, 160.97173144876325, 57.69999999999998, 59.90641837455829
110, 15, 157.79151943462898, 54.39999999999997, 57.80948551236748
100, 15, 154.61130742049468, 51.39999999999998, 55.71255265017666
90, 15, 151.43109540636044, 49.13359999999998, 53.61561978798586
80, 15, 148.25088339222614, 46.13359999999998, 50.518686925795045
190, 20, 183.88178913738017, 87.59999999999998, 89.60471693290735
180, 20, 180.0479233226837, 83.29999999999998, 86.4878607028754
170, 20, 176.21405750798723, 80.29999999999998, 82.37100447284345
160, 20, 172.38019169329073, 75.99999999999999, 79.2541482428115
150, 20, 168.54632587859422, 71.99999999999997, 75.13729201277954
140, 20, 164.71246006389777, 68.69999999999999, 71.0204357827476
130, 20, 160.87859424920126, 64.69999999999997, 66.90357955271564
120, 20, 157.0447284345048, 61.399999999999984, 64.78672332268368
110, 20, 153.21086261980832, 57.39999999999997, 61.66986709265175
100, 20, 149.37699680511182, 54.13359999999999, 59.5530108626198
90, 20, 145.54313099041534, 51.13359999999998, 55.43615463258785
80, 20, 141.70926517571885, 47.13359999999998, 53.319298402555894
190, 25, 184.41690962099125, 97.59999999999998, 100.6210274052478
180, 25, 180.04373177842564, 92.29999999999998, 95.48773294460639
170, 25, 175.67055393586006, 88.29999999999998, 91.35443848396503
160, 25, 171.29737609329445, 82.99999999999999, 87.22114402332359
150, 25, 166.92419825072884, 78.99999999999997, 83.08784956268224
140, 25, 162.55102040816328, 74.69999999999999, 78.95455510204081
130, 25, 158.17784256559764, 70.69999999999997, 72.82126064139943
120, 25, 153.80466472303203, 65.39999999999998, 69.687966180758
110, 25, 149.43148688046648, 61.39999999999997, 65.5546717201166
100, 25, 145.05830903790087, 57.13359999999997, 61.42137725947519
90, 25, 140.6851311953353, 53.13359999999998, 58.28808279883379
80, 25, 136.31195335276968, 49.13359999999998, 55.1547883381924
190, 30, 184.86595174262735, 108.59999999999998, 113.63471420911527
180, 30, 180.04021447721178, 102.29999999999998, 107.4876257372654
170, 30, 175.21447721179626, 96.29999999999998, 101.34053726541555
160, 30, 170.3887399463807, 90.99999999999999, 94.19344879356568
150, 30, 165.56300268096513, 85.99999999999997, 89.04636032171581
140, 30, 160.73726541554961, 80.69999999999999, 83.89927184986593
130, 30, 155.91152815013405, 75.7, 78.75218337801608
120, 30, 151.08579088471848, 70.4, 74.60509490616622
110, 30, 146.26005361930297, 65.39999999999998, 70.45800643431636
100, 30, 141.4343163538874, 60.13359999999997, 65.31091796246646
90, 30, 136.60857908847183, 56.13359999999998, 61.1638294906166
80, 30, 131.78284182305632, 51.13359999999998, 57.01674101876673

The conclusions

Here’s a couple of quick conclusions I was able to draw:
1. If all you did was compute deco based on your avg depth + time after having hit the stop, the biggest difference I could find was a little over 6 minutes (and it was negative.) Meaning, if we did the entire dive at Avg depth, we’d be calling 6 minutes more at most.
2. The maximum deviation expressed in percentage points, was 13 percent. Meaning adding a safe 15% to what would be deco based on avg depth would be a good rule of thumb.
I haven’t played with greater depths or attempted to plot/chart these to get a visual “feel” for how the curves shape up. I haven’t tried VPM.

On Type Systems: Javascript is pleasant (got it right.)

In my new-found mission to try and change minds, I’m going to try and convince you why Javascript’s object/type system got it right (for a mainstream widely accepted language.)

I’ve always felt this, but  the moment one of my hard-core strongly-typed friends said this two weeks ago: “You know, when you think about it, Javascript makes a lot of sense,” I knew we’d reached a tipping point.

Site note: If you want to relate this to life-advice or self-help advice, go read this excellent blog post too: http://thelastpsychiatrist.com/2011/01/are_law_schools_lying_to_their.html
I don’t doubt for a moment he sincerely believes he is a lawyer, because lawyer for him isn’t a profession or even a job, it’s a label, a code word for a kind of intellectualism he wants for himself.  As long as “all of my friends see me as…” it was well worth the cost.  He didn’t study to become an attorney, he bought a back-up identity.

Composing my thoughts linearly on this subject is hard, but I’m going to try. Let’s begin with some classical examples in any introductory OOP class/textbook/course.

Every object is of a certain “type”, the logic goes. A car is also a vehicle. A vehicle is also a machine.  A boat is a vehicle.  An aircraft is a vehicle. Knowing this, you can now write targeted code that operates on all vehicles once and run it against any vehicle.

Furthermore, you can write code targeted to each of these specializations too. Code that only works on an aircraft. Code that only works on a car. Code that only works on a boat. Code that works on all machines.

All of this makes so much sense. It’s a good world view. You define your business model as a set of “objects” with “types” and then you write code against those types. Now you can extend your system by declaring new “things”, which might be vehicles, and once you declare them as being a vehicle, your vehicle-based code works against them instantly!

So far so good, right? See any problems with this? Now the obvious cases are easy.

  1. What if a car is also a plane? (Multiple inheritence). Then how do you determine which “drive” function is called? Etc. etc.
  2. What about a “Truck”? Is a Truck a subset of car or directly a subset of vehicle?

But these are copouts. Let’s address something FAR more real-life oriented, since OOP-people are all about real-life modeling.

Is your car exactly the same as it came out of the factory? I bet the brand new factory car’s “o.pedalToTheMetal()” has a different impulse response than on yours. I bet your truck’s “o.turnLeftIndicatorOn()” doesn’t always work. Mine doesn’t. I bet your “o.stopInTenFeet()” doesn’t always stop in ten feet.

Beyond the very academically cool problems that research papers are made of, like multiple inheritence and how to define a “type”, is the very very pragmatic problem none of these examples ever considers: After you drive your car out of the showroom, it is distinctly YOUR car. It stops meeting factory specifications almost instantly. It acts different. It behaves different. It performs different. A mirror is chipped. A wheel is scratched. A door has a dent. A window doesn’t quite roll open.

This is why your textbook never moves past the definition of simple objects. It truly doesn’t have the balls to follow through with that example as a real living breathing growing changing software environment would. What it doesn’t compare is that each individual car, over time, becomes a class over time. Thus you end up with a thousand confusing classes, repeated code, and a bunch of junk which you are forced to contain with the infamous “design patterns.”

What I’m saying is, those OOP analogies DO work, but OOP proponents were always too cowardly to take them to their logical conclusion. If by now you’re thinking Alan Kay got it right the first time around with Smalltalk, welcome to every functional programmer’s point of view.

Now for a moment lets focus on identity. This could get slightly philosophical in the sense of “Who am I?” – go read the blog post to ponder further. The takeaway from that article which is relevant to this discussion is: Do you care that someone *is* a doctor, or do you care that *they can remove your tumor*? How you think about this, determines greatly how you approach any desgin, problem and thus solution in life – and in programming.

Does it tell you more that a person “canDefendAgainstADUI()” than that they “areLawyer() && !areLawyerWithoutLicense()” (Because you know someone could be a lawyer, and not have a license to practice law.)

See what I mean about “identity”. I’m an engineer because I engineer things. When I’m scuba diving, I’m a diver. When I’m dancing, I’m a dancer. My “identity” is far less valuable in these situations than by “ability”. Whether I can perform a cross-body lead is far more relevant than who I am and the host of identities and sub-identities that define me.

If you’re casting for the part of Long John Silver, you’re looking for the condition “has one leg”, than you are looking for the identity of “One-legged Person”. It is easier to test, reason about, think about, program against, debug, maintain and in general understand and comprehend. It is direct. It is to the point. And it expresses *intent*. Last year I wrote a very popular article about choice of programming languages, and my key recommendation was: Pick the language that expresses your intent. Of all the things you can decide on – expressing intent is the ONLY one that matters.

If any of this makes sense this far, and I haven’t lost you, or pissed you off, then you’re already coming around to seeing Javascript in a better light. You’re still probably feeling a bit “icky” about the “mess”. You’ve never seen the mess, but you’ve heard of it. You remember Netscape Navigator from 5 years ago. You are thinking about lack of auto-complete. No proper structure. No proper “type”. Without a “class” how do you ensure things can be constrained and contained?

Ever since I learned programming, this is what I started to do in old BASIC:

DIM a, b, c

It is literally the first thing you do before you can do anything useful. You have to define a name, and assign it a “type”. Then all is well, because the “type” protects you against doing something rash.

In the real world though, types can be a problem. When you have a program linked across three versions of the same library, you can begin to see problems. What one programmer thought is “APerson”, is not the same thing as what another programmer thought it was, which is still different from what a third one thought it was.

That’s where we realize why type systems are so painful: what a specific, particular, constructed, existing, working, living, breathing object can do for me is FAR more important than what it declares itself to “be”. I can declare myself a lawyer. For all you readers, you have no clue whether or not I “am a lawyer”. You might google my degrees, but you will never know that I *don’t* have a law degree. What you *do* know is that I am not bar certified in the state of Washington. Even by the most simplistic modeling angle, modeling behaviors is far more relevant than modeling identity. Incidentally, it also happens to be simpler. Go figure.

Wait a minute you ask – you’re just about thinking of interface-enforcement. Good job! You’re about to counter me with this: “What if I wrote a piece of code that should take in an object of an interface to, let’s say sort. One object might implement ‘Sort()’, another might implement ‘sort()’ while yet another might implement ‘doSort()’ and yet another might implement ‘performSort()’. Doesn’t a class/interface help me enforce consistency?”

That is a very astute observation and counter-challenge to my argument. There are two distinct responses I have to this.

My first response is based on group theory and equivalence classes – funny how I’m using classes to argue against classes. Look at it practically. Does a class REALLY help you enforce consistency? Think about it deeply to the last time you really did this and when it wasn’t toy software you wrote in your spare time where you were the sole contributor.

In my experience, as I’m sure it has been in yours too, classes can “confirm” whether an object implements your “Sort” method, but it can’t enforce shit. When you wrote your subclass, either you were aware of ISortable and decided to implement it, in which case you knew what your consumer wanted – which makes it no more or less complex in javascript. Or you didn’t know explicitly exposing ISortable would come in handy, in which case you’re doubly screwed in your classical architecture. Because you just entered refactoring hell of the deepest rung, my friend.

Furthermore, you can’t encapsulate context for shit! Now you’re doing globals and thread-locals and all sorts of nonsense. Then you’re doing locks and synchronization to deal with it. Then you’re doing async and continuations to deal with the locks! Then you’re using frameworks to deal with those callbacks and what not. Then you’re using IDEs to help you use those frameworks. Why? Because fundamentally you couldn’t pass around behaviors to save your life.

The correct (better) way of passing an object to a consumer is to never really pass an object. Hardly any consumer cares about your structure. It cares about performing an action. It is FAR easier to pass it in an anonymous function called “bookAirplaceTicket”, than to pass it in 10 objects called, “(ticketParams, context, customerInfo, databaseConnectionString,…. etc. etc.)”

Behind your function the consumer doesn’t care that it’s a toy function, a mock function, a stub or uses 10 objects or 500. You don’t care to tell your consumer about a thousand interfaces.

With me so far? Haven’t lost you? Is any of this making some sense? Then let me give you my second response, which is rhetorical.

What about “a Lawyer” who doesn’t want to prosecute? Who cares “how long”, or “why”. The classical approach is to give the lawyer a bunch of attributes. “OnVacation”, “VacationDuration”, and so on. Then every other person has to understand what a lawyer’s vacation means. And he has to check it. And if someone forgets to, they just booked themselves a useless lawyer. What if a lawyer broke his kneecap? Add another attribute: “IsKneecapIntact”. Then tell each human every intending to deal with a lawyer to check THAT condition.

Why? All because you refused to check whether the person “bool canProscecute()”. Because you insisted on getting all philosophical and typing him as “A Lawyer” more than you cared what you want out of him – which is a person who is currently capable, qualified and intends to prosecute.

How does this apply to OOP? Ever use an object that throws “NotImplementedException”? What does it mean? How is the object “Bus” if “Drive()” throws “NotImplementedException”? Is there a non-drivable Bus? It’s a tin can – I’ll go with you that far. But do you care that you have a “Bus”, or do you care that you have a thing that is capable of being driven and can carry a certain payload?

So now, not only do types not tell you something about an object, but you’re actively fighting them – checking for nulls or nils or exceptions or what not – when all the object had to do, was remove it’s method/function/behavior/message-handler called “drive”, and you would have stopped caring. Because it calls itself a Bus, and because the language insists that a Bus always have a method called Drive, you now have to execute it before you know it’s going to work. Surely that’s a terribly way to write code that can be reasoned about.

But wait you say – you have frameworks and static analyzers and expensive tools to prevent people can’t do that. You my friend have solved an NP-hard problem – you have successfully statically analyzed a program, using another program, and assured me it terminates.

I hope I’ve provided some “real life” examples on why even modern type systems fail, why, when you stop trying to implement your classical type system in Javascript, you actually end up with a much better, cleaner, simpler, direct, to-the-point and relevant methodology to write code. Depending on Javascript to “enforce” your “Type” will never work. The question is – what did your “Type” enforcement actually guarantee to YOU in your other language? Don’t tell me what you *think* it did. What did the language itself guarantee when it said an object is of “A Type”? What was the mathematical boolean predicate or premise that you could infer beyond any doubt, when an object was of “A Type”? Now how many premises did you actually assume when you designed your software?

Is it surprising that software is a nightmare to maintain? We live on assumptions/premises that were never guaranteed, enforced or validated. Because 99% good citizens always wrote things a certain way we made assumptions that were never true. Then we call someone a “bad programmer who doesn’t know C/Java/Ruby/Python/whatever” when they don’t follow a pattern we like – but it was never a guarantee given to you by the language. Javascript was simply ballsy enough to admit it and own it.

 

Recipes to play with “No Deco Limits”

For a technical, philosophical and every other view on No-Deco limits, go to scubaboard.

First let me elaborate on why the mathematical model is important and how to play with it.

Models allow reverse engineering (fitting)
This post is about understanding the mathematical model in terms of the NDLs we know and use. One of the most important things about any model is, once you have verified it in one direction (how much deco must I do for a specific dive), you can then run it in the other direction (how much can I dive, before I have mandatory deco.) You can then understand what parameters, adjustments, corrections, variations other people were using when they came up with the numbers they gave you.

This is a subtle point and one excites me the most. What this means is, if someone said to me, “Let’s dive to 100 feet, for 40 minutes, on 32%, and ascend while stopping two minutes every ten feet.”, I now have the tools guess their parameters.

Suppose they were using VPM, then I can reverse-engineer things like what bubble sizes they considered “critial”, and what their perfusion rates were assumed to be, etc. If they were using the Buhlmann, I can reverse-engineer their gradient factors.

This is awesome because it allows me to break down the black box a little – instead of whining about “My computer said 10 minutes, and yours said 20 minutes”, I can whine in a far more specific and deeply annoying way – “Oh I see, my computer is assuming critical He bubble size to be 0.8 microns, but yours is assuming 0.5 microns.” Remember kids – you shouldn’t always whine, but when you do, make it count!

When your computer has a “conservativism factor”, what does that mean? Is it simply brute-multiplying each stop by that factor? Is it multiplying the shallow stops? Is it a coefficient used in a curve-fitting model, if let’s say it’s trying to fit a curve like a spline or bezier to “smoothen out the ascent”? Conservativism factor “4” makes you no more intelligent about what’s going on, than saying, “These are the adjustments/corrections I make.”

While these ARE “just models”, models are nothing if they are not properly parameterized.

Here again, existing software came short in what I could do with it. The GUI is a great tool for a narrow specific task. But when exploring the model, nothing is more useful and powerful than being able to play with it programmatically. Once I begin posting recipes you’ll know what is so fascinating about “playing with it”.

If you’re a fan of the Mythbusters, you will see them refer to this as, producing the result.

Models allow study of rates-of-change (sensitivity analysis)

The other very important aspect of a model, even the constants are wrong, is the overall rate of change, or growth. Also called sensitivity analysis (meaning how sensitive is my model to which parameters.)

Let us say we had a few things in our control – ppO2, ppN2, ppH2, bottom time, ascent rate, descent rate, stops.

What a mathematical model allows us to learn (and should help us learn), is how sensitive the plans are to each of these parameters, even if specific constants are wrong.

Let me put it this way – if you wanted to guess the sensitivity of a “car” to things like – weight, number of gear shifts, size of wheels, etc., and you had a hummer to study with, and but had to somehow extend that knowledge to an sedan, how would you do it?

The “constants” are different in both. But the models aren’t. An internal combustion engine has an ideal RPM rate where it provides the maximum torque for minimum fuel consumption. The specific rev rate will be different. And you can’t account for that. However, the “speed at which inefficiency changes”, is a commonality in all internal combustion engines. Unless the  sedan is using a wenkel engine, the rate-of-change characteristics still apply. Even if the hummer’s ideal RPM 2000, and the sedan’s is 1500, the questions we can still study are – when I deviate 10% from the ideal, how does that affect fuel consumption, and torque?

So even if the software/constants I wrote are entirely wrong (which they probably are), they still serve a valuable tool in studying these changes.

A study in NDLs

Naturally one of the first unit tests I wrote for the algorithm, was PADI dive tabes: https://github.com/nyxtom/dive/blob/master/test/dive_test.js#L646

The point here was to recreate an approximation of the dive tables. What fascinated me was how much subtle understanding there is behind that number though.

First let’s define an NDL as: Maximum time at a depth, with an ascent ceiling of zero.

What this means is, whether you use Buhlmann, or VPM or whatever model you like, the NDL is the time after which you can ascend straight to the surface (depth of zero meters.)

So what happens when we run pure Buhlmann without a gradient factor?

(This snippet is meant to be executed here: http://deco-planner.archisgore.com/)

var buhlmann = dive.deco.buhlmann();
var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
plan.addBottomGas("Air", 0.21, 0.0);
plan.ndl(dive.feetToMeters(100), "Air", 1.0);

//Result is 16

That’s a bit strange isn’t it? The official NDL on air is closer to 19 or 20 minutes (with a “mandatory safety stop”.)

Does it mean my model is wrong? My software is wrong? Compare it with different depths, and you’ll find it gives consistently shorter NDLs. What gives?

Let’s try fudging the conservativism factor a bit.

var buhlmann = dive.deco.buhlmann();
var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
plan.addBottomGas("Air", 0.21, 0.0);
plan.ndl(dive.feetToMeters(100), "Air", 1.1);


//Result is 19

That’s just about where we expect it to be. This tells me that the NDL could have been computed with a less conservative factor. But is there something I’m missing?

Wait a minute, this assumes you literally teleport to the surface. That’s not usually the case. Let’s run the same NDL with a 30-feet-per-minute ascent (this time we have to use the getCeiling method).

for (var bottomTime = 1; bottomTime <= 120; bottomTime++) {
 var buhlmann = dive.deco.buhlmann();
 var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
 plan.addBottomGas("Air", 0.21, 0.0);
 plan.addFlat(dive.feetToMeters(100), "Air", bottomTime);
 plan.addDepthChange(dive.feetToMeters(100), 0, "Air", 3);
 
 if (plan.getCeiling(1.0) > 0) {
 console.log("NDL for 100 feet is: " + (bottomTime-1));
 break;
 }
}
NDL for 100 feet is: 19

That’s interesting. For the same parameters, if we assume an ascent of two minutes, our NDL went up – we can stay down longer if we are ASSURED of a 30-feet-per-minute ascent at the end.

Now remember these numbers are entirely made up. My constants are probably helter-skelter. You shouldn’t use the SPECIFIC numbers on this model. But there’s something intuitive we discovered.

Let’s try it again with a 3 minute safety stop at 15 feet:

for (var bottomTime = 1; bottomTime <= 120; bottomTime++) {
 var buhlmann = dive.deco.buhlmann();
 var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
 plan.addBottomGas("Air", 0.21, 0.0);
 plan.addFlat(dive.feetToMeters(100), "Air", bottomTime);
 plan.addFlat(dive.feetToMeters(15), "Air", 3);
 
 if (plan.getCeiling(1.0) > 0) {
 console.log("NDL for 100 feet is: " + (bottomTime-1));
 break;
 }
}
NDL for 100 feet is: 22

Once again these numbers make sense – if we are ASSURED of a 3 minute stop at 15 feet, our NDL goes up. How interesting.

This gives you a better idea of a “dynamic” dive. You aren’t exactly teleporting from depth to depth, and those ascents and descents matter. Try this for different gasses.

Dive Planner Recipes

This is really for my personal reference. If it helps you, I’m glad.

A couple of weeks ago, I wrote this tool (http://deco-planner.archisgore.com.) You can go read the history, motivation, etc. on that page and the github repo ad nauseum.

NOTE: Why is this important/useful? Don’t computers tell you how much deco you should do? Yes they do exactly that, and do it pretty well. Now here’s what a computer won’t tell you – how much deco would you be looking at _if_ you extended the dive by 10 minutes? Let’s say that by extending it 10 minutes, or pushing it down by 10 feet more, your obligation jumps from 30 minutes to 50 minutes. That is objectively two-thirds more gas than you planned for. This tool/post is about understanding what those shapes are so you can decide, even if you had your computer telling you what your deco was, whether you’re going to like doing it or not.

This post is about how to effectively use that tool with some pre-canned recipes to generate information cheap/easy than any other tool I know of or can think of.

The first recipe (and the primary reason I built the entire damn thing, is to get an idea of how ratio-deco changes over different bottom times. Does it grow linearly? Non-linearly? etc. Say you’re at 150 feet for “x” minutes longer than your plan, and you just don’t happen to have a computer to do your math. Do you have a vague idea how the shape of increments changes?)

Let’s find the answer to that very question quickly.

Deco time change as a ratio of bottom time:

//This function is a utility to get total dive time out of all segments
var calculateDiveTime = function(diveplan) {
    var totalTime = 0;
    for (var index = 0; index < diveplan.length; index++) {
        var segment = diveplan[index];
        totalTime = totalTime + segment.time;
    }
    return totalTime;
}

//In this loop we'll run a 150 foot dive for bottom times between 1 to 120 
// minutes and calculate total dive time, find deco time (by subtracting 
// bottom time), and store it in the array.
for (var time = 1; time <= 120; time++) {
    var buhlmann = dive.deco.buhlmann();
    var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
    plan.addBottomGas("2135", 0.21, 0.35);
    plan.addDecoGas("50%", 0.50, 0);
    plan.addDecoGas("Oxygen 100%", 1.0, 0.0);
    plan.addFlat(dive.feetToMeters(150), "2135", time);
    var decoPlan = plan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
    var totalTime = calculateDiveTime(decoPlan);
    var decoTime = totalTime - time;
    console.log(decoTime);
}

What’s really cool is, I can now chart that decoTimes array using Excel or Numbers or whatever your spreadsheet is. I just paste it in plot.ly, and get this:

Deco Time change as a ratio of depth:

Now let’s look at how does decompression change if my depth came out different than anticipated? We can generate deco schedules for that too:

//This function is a utility to get total dive time out of all segments
var calculateDiveTime = function(diveplan) {
    var totalTime = 0;
    for (var index = 0; index < diveplan.length; index++) {
        var segment = diveplan[index];
        totalTime = totalTime + segment.time;
    }
    return totalTime;
}

//In this loop we'll run a 150 foot dive for bottom times between 1 to 120 
// minutes and calculate total dive time, find deco time (by subtracting 
// bottom time), and store it in the array.
for (var depth = 120; depth <= 180; depth++) {
    var buhlmann = dive.deco.buhlmann();
    var plan = new buhlmann.plan(buhlmann.ZH16BTissues);
    plan.addBottomGas("2135", 0.21, 0.35);
    plan.addDecoGas("50%", 0.50, 0);
    plan.addDecoGas("Oxygen 100%", 1.0, 0.0);
    plan.addFlat(dive.feetToMeters(depth), "2135", 30);
    var decoPlan = plan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
    var totalTime = calculateDiveTime(decoPlan);
    var decoTime = totalTime - 30;
    console.log(decoTime);
}

And we get this:

 

Finally, let’s plot how VPM-B compares to Buhlmann. In this case, we have to add a depth change from 0 feet to 150 feet, because VPM is very sensitive to the slopes unlike Buhlmann which only worries about tissue loading (more on this later, I promise.)

Here’s the code to generate Buhlmann vs VPM deco times for the same dive profile:

//This function is a utility to get total dive time out of all segments
var calculateDiveTime = function(diveplan) {
    var totalTime = 0;
    for (var index = 0; index < diveplan.length; index++) {
        var segment = diveplan[index];
        totalTime = totalTime + segment.time;
    }
    return totalTime;
}

//In this loop we'll run a 150 foot dive for bottom times between 1 to 120 
// minutes and calculate total dive time, find deco time (by subtracting 
// bottom time), and store it in the array.
for (var time = 1; time <= 120; time++) {
    var buhlmann = dive.deco.buhlmann();
    var bplan = new buhlmann.plan(buhlmann.ZH16BTissues);
    bplan.addBottomGas("2135", 0.21, 0.35);
    bplan.addDecoGas("50%", 0.50, 0);
    bplan.addDecoGas("Oxygen 100%", 1.0, 0.0);
    bplan.addDepthChange(0, dive.feetToMeters(150), "2135", 5);
    bplan.addFlat(dive.feetToMeters(150), "2135", time);
    var bdecoPlan = bplan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
    var btotalTime = calculateDiveTime(bdecoPlan);
    var bdecoTime = btotalTime - time - 5;

    var vpm = dive.deco.vpm();
    var vplan = new vpm.plan();
    vplan.addBottomGas("2135", 0.21, 0.35);
    vplan.addDecoGas("50%", 0.50, 0);
    vplan.addDecoGas("Oxygen 100%", 1.0, 0.0);
    vplan.addDepthChange(0, dive.feetToMeters(150), "2135", 5);
    vplan.addFlat(dive.feetToMeters(150), "2135", time);
    var vdecoPlan = vplan.calculateDecompression(false, 0.2, 0.8, 1.6, 30);
    var vtotalTime = calculateDiveTime(vdecoPlan);
    var vdecoTime = vtotalTime - time - 5;

    console.log(bdecoTime + " " + vdecoTime);
}

And the chart that comes out:

Scuba Diving tools

At some point I made a page to document random scuba tools I build/will-build/want-to-build/want-others-to-build.

The last part is a bit tricky. I want many things – and asking others to build something is painful. You don’t always get what you want. You don’t always like what you get. You don’t always get what you want, and how you like it at a price you’re willing to pay for it.

So in a very terribly-theme’d page (because I absolutely suck at designing web pages), here’s a link to some tools I’m working on:

Scuba Diving Resources

The next big couple of things coming up are – a better UI (especially plotter/charter) for the dive planner, and a RaspberryPi Zero based dive computer (on which you can write deco plans on a full linux distro.)

Don’t hold your breath though. My history with these things is very haphazard depending on how obsessive I feel the need/want.