c - How to inject packets into Receive (Rx) path using LWF driver on Windows? -


i developing ndis 6 light-weight filter (lwf) driver based on winpcap. can imagine there're 2 paths network adapter: tx , rx.

tx sending way.

rx receiving way.

we know winpcap able send packets network (so tx way). want know if it's possible send packets rx, means injecting packet adapter , pretending packet arrival packet network.

first don't know if viable? if yes, continue read:

i have written code, using ndisfindicatereceivenetbufferlists call indicate crafted packet upper layer, free packet in filterreturnnetbufferlists handler. upper layer modules including windows os know there's new packet coming network (actually not truth).

however, approach doesn't work. used nping (from nmap) ping gateway, , nping window freezes, can't terminated task manager. must driver halt.

i noticed way should possible based on:

  1. jeffery's answer post: is filtersendnetbufferlists handler must ndis filter use ndisfsendnetbufferlists?

  2. an exaplanation microsoft: https://msdn.microsoft.com/en-us/library/windows/hardware/ff570452(v=vs.85).aspx

  3. an example provided osr: https://www.osronline.com/showthread.cfm?link=242847

then analyzed log printed driver. seems ndisfindicatereceivenetbufferlists call (used sending rx) called once, driver works on other jobs, while ring 3 nping died. reused of code existing well-running code npf_sendcompleteex (this handler of send complete normal tx). don't think part have bugs?

the whole code base hosted on github: https://github.com/nmap/npcap

it doesn't contain faulty code in post, haven't submit it, provide backgrounds if need:)

my injecting code:

ntstatus npf_write(     in pdevice_object deviceobject,     in pirp irp     ) {     popen_instance      open;     popen_instance      groupopen;     popen_instance      tempopen;     pio_stack_location  irpsp;     ulong               sendflags = 0;     pnet_buffer_list    pnetbufferlist;     ndis_status         status;     ulong               numsends;     ulong               numsentpackets;      trace_enter();      irpsp = iogetcurrentirpstacklocation(irp);      open = irpsp->fileobject->fscontext;      if (npf_startusingopeninstance(open) == false)     {         //          // irp_mj_cleanup received, fail request         //         irp->iostatus.information = 0;         irp->iostatus.status = status_cancelled;         iocompleterequest(irp, io_no_increment);         trace_exit();         return status_cancelled;     }      numsends = open->nwrites;      //     // validate send parameters set ioctl     //     if (numsends == 0)     {         npf_stopusingopeninstance(open);         irp->iostatus.information = 0;         irp->iostatus.status = status_success;         iocompleterequest(irp, io_no_increment);          trace_exit();         return status_success;     }      //     // validate input parameters:      // 1. packet size should greater 0,     // 2. less-equal max frame size link layer ,     // 3. maximum frame size of link layer should not zero.     //     if (irpsp->parameters.write.length == 0 ||  // check buffer provided user not empty         open->maxframesize == 0 ||  // check maxframesize correctly initialized         irp->mdladdress == null ||         irpsp->parameters.write.length > open->maxframesize) // check fame size smaller mtu     {         trace_message(packet_debug_loud, "frame size out of range, or maxframesize = 0. send aborted");          npf_stopusingopeninstance(open);          irp->iostatus.information = 0;         irp->iostatus.status = status_unsuccessful;         iocompleterequest(irp, io_no_increment);          trace_exit();         return status_unsuccessful;     }      //      // increment ref counter of binding handle, if possible     //     if (npf_startusingbinding(open) == false)     {         trace_message(packet_debug_loud, "adapter unbinding, cannot send packets");          npf_stopusingopeninstance(open);          irp->iostatus.information = 0;         irp->iostatus.status = status_invalid_device_request;         iocompleterequest(irp, io_no_increment);          trace_exit();         return status_invalid_device_request;     }       ndisacquirespinlock(&open->writelock);     if (open->writeinprogress)     {         // write operation in progress         ndisreleasespinlock(&open->writelock);          npf_stopusingbinding(open);          trace_message(packet_debug_loud, "another send operation in progress, aborting.");          npf_stopusingopeninstance(open);          irp->iostatus.information = 0;         irp->iostatus.status = status_unsuccessful;         iocompleterequest(irp, io_no_increment);          trace_exit();          return status_unsuccessful;     }     else     {         open->writeinprogress = true;         ndisresetevent(&open->ndiswritecompleteevent);     }      ndisreleasespinlock(&open->writelock);      trace_message2(packet_debug_loud,         "max frame size = %u, packet size = %u",         open->maxframesize,         irpsp->parameters.write.length);      //     // reset number of packets pending sendcomplete     //     open->transmitpendingpackets = 0;      ndisresetevent(&open->writeevent);      numsentpackets = 0;      while (numsentpackets < numsends)     {         pnetbufferlist = ndisallocatenetbufferandnetbufferlist(open->packetpool,             0,             0,             irp->mdladdress,             0,             irp->mdladdress->bytecount);          if (pnetbufferlist != null)         {             //             // packet available, prepare , send ndissend.             //              //             // if asked, set flags packet.             // currently, situation in set flags disable reception of loopback             // packets, i.e. of packets sent us.             //             //if (open->skipsentpackets)             //{             //  npfsetnblflags(pnetbufferlist, g_sendpacketflags);             //}               // packet hasn't buffer needs not freed after every single write             reserved(pnetbufferlist)->freebufafterwrite = false;              // save irp associated packet             // reserved(ppacket)->irp=irp;              // attach writes buffer packet              interlockedincrement(&open->transmitpendingpackets);              ndisresetevent(&open->ndiswritecompleteevent);              //receive packets before sending them             assert(open->grouphead != null);             if (open->grouphead != null)             {                 groupopen = open->grouphead->groupnext;             }             else             {                 //this impossible                 groupopen = open->groupnext;             }  #ifdef have_wfp_loopback_support             // not capture send traffic send, if our loopback adapter.             if (open->loopback == false)             { #endif                 while (groupopen != null)                 {                     tempopen = groupopen;                     if (tempopen->adapterbindingstatus == adapter_bound && tempopen->skipsentpackets == false)                     {                         npf_tapexforeachopen(tempopen, pnetbufferlist);                     }                      groupopen = tempopen->groupnext;                 } #ifdef have_wfp_loopback_support             } #endif              pnetbufferlist->sourcehandle = open->adapterhandle;             npfsetnblchildopen(pnetbufferlist, open); //save child open object in packets             //sendflags |= ndis_send_flags_check_for_loopback;              // recognize ieee802.1q tagged packet, no many adapters support vlan tag packet sending, no use end users,             // , code examines data lacks efficiency, left commented, sending part unfinished.             // code refers win10pcap @ https://github.com/softethervpn/win10pcap. //          if (open->loopback == false) //          { //              puchar pheaderbuffer; //              uint ifres; // //              boolean withvlantag = false; //              uint vlanid = 0; //              uint vlanuserpriority = 0; //              uint vlancanformatid = 0; // //              ndisquerymdl( //                  irp->mdladdress, //                  &pheaderbuffer, //                  &ifres, //                  normalpagepriority); // //              // determine if packet ieee802.1q tagged packet. //              if (ifres >= 18) //              { //                  if (pheaderbuffer[12] == 0x81 && pheaderbuffer[13] == 0x00) //                  { //                      ushort ptmpvlantag = 0; // //                      ((uchar *)(&ptmpvlantag))[0] = pheaderbuffer[15]; //                      ((uchar *)(&ptmpvlantag))[1] = pheaderbuffer[14]; // //                      vlanid = ptmpvlantag & 0x0fff; //                      vlanuserpriority = (ptmpvlantag >> 13) & 0x07; //                      vlancanformatid = (ptmpvlantag >> 12) & 0x01; // //                      if (vlanid != 0) //                      { //                          withvlantag = true; //                      } //                  } //              } //          }              //             //  call mac             // #ifdef have_wfp_loopback_support             if (open->loopback == true)             {                 npf_loopbacksendnetbufferlists(open->grouphead,                     pnetbufferlist);             }             else #endif #ifdef have_send_to_receive_path_support                 if (open->sendtorxpath == true)                 {                     if_loud(dbgprint("hahahahahahahahahahahaha:: sendtorxpath, open->adapterhandle=%p, pnetbufferlist=%u\n", open->adapterhandle, pnetbufferlist);)                     // pretend receive these packets network , indicate them upper layers                     ndisfindicatereceivenetbufferlists(                         open->adapterhandle,                         pnetbufferlist,                         ndis_default_port_number,                         1,                         0);                 }                 else #endif                 {                     ndisfsendnetbufferlists(open->adapterhandle,                         pnetbufferlist,                         ndis_default_port_number,                         sendflags);                 }              numsentpackets ++;         }         else         {             //             // no packets available in transmit pool, wait time.              // event gets signalled when @ least half of tx packet pool packets             // available             //             ndiswaitevent(&open->writeevent, 1);         }     }      //     // when reach point, packets have been enqueued ndissend,     // need wait packets completed sendcomplete     // (if of ndissend requests returned status_pending)     //     ndiswaitevent(&open->ndiswritecompleteevent, 0);      //     // packets have been transmitted, release use of adapter binding     //     npf_stopusingbinding(open);      //     // no more writes in progress     //     ndisacquirespinlock(&open->writelock);     open->writeinprogress = false;     ndisreleasespinlock(&open->writelock);      npf_stopusingopeninstance(open);      //     // complete irp , return success     //     irp->iostatus.status = status_success;     irp->iostatus.information = irpsp->parameters.write.length;     iocompleterequest(irp, io_no_increment);      trace_exit();      return status_success; } 

my filterreturnnetbufferlists handler:

_use_decl_annotations_ void npf_returnex(     ndis_handle         filtermodulecontext,     pnet_buffer_list    netbufferlists,     ulong               returnflags     ) /*++  routine description:      filterreturnnetbufferlists handler.     filterreturnnetbufferlists optional function. if provided, ndis calls     filterreturnnetbufferlists return ownership of 1 or more netbufferlists     , embedded netbuffers filter driver. if handler null, ndis     skip calling filter when returning netbufferlists underlying     miniport , call next lower driver in stack. filter doesn't     provide filterreturnnetbufferlists handler cannot originate receive indication     on own.  arguments:      filterinstancecontext       - our filter context area     netbufferlists              - linked list of netbufferlists                                    filter driver indicated in previous call                                    ndisfindicatereceivenetbufferlists     returnflags                 - flags specifying if caller @ dispatch_level  --*/ {     popen_instance      childopen;     popen_instance      groupopen;     popen_instance      tempopen;     boolean             freebufafterwrite;     pnet_buffer_list    pnetbuflist;     pnet_buffer_list    pnextnetbuflist;     pnet_buffer         currbuff;     pmdl                pmdl;     popen_instance      open = (popen_instance) filtermodulecontext;      trace_enter();  #ifdef have_send_to_receive_path_support     pnetbuflist = netbufferlists;      while (pnetbuflist != null)     {         pnextnetbuflist = net_buffer_list_next_nbl(pnetbuflist);         net_buffer_list_next_nbl(pnetbuflist) = null;          if (pnetbuflist->sourcehandle == open->adapterhandle) //this our self-sent packets         {             trace_message(packet_debug_loud, "hahahaha own send rx packets");             childopen = npfgetnblchildopen(pnetbuflist); //get child open object sends these packets             freebufafterwrite = reserved(pnetbuflist)->freebufafterwrite;              if (freebufafterwrite)             {                 //                 // packet sent npf_bufferedwrite()                 //                  //free nbl allocate myself                 currbuff = net_buffer_list_first_nb(pnetbuflist);                 while (currbuff)                 {                     pmdl = net_buffer_first_mdl(currbuff);                     ndisfreemdl(pmdl); //free mdl                     currbuff = net_buffer_next_nb(currbuff);                 }                 ndisfreenetbufferlist(pnetbuflist); //free nbl             }             else             {                 //                 // packet sent npf_write()                 //                  //free nbl allocate myself                 ndisfreenetbufferlist(pnetbuflist); //free nbl             }              // if should false, open grouphead itself, grouphead known ndis , invoked in npf_sendcompleteex() function.             if (open->grouphead != null)             {                 groupopen = open->grouphead->groupnext;             }             else             {                 groupopen = open->groupnext;             }              //groupopen = open->groupnext;              while (groupopen != null)             {                 tempopen = groupopen;                 if (childopen == tempopen) //only indicate specific child open object                 {                     npf_sendcompleteexforeachopen(tempopen, freebufafterwrite);                     break;                 }                  groupopen = tempopen->groupnext;             }         }         else         {             trace_message(packet_debug_loud, "hahahaha not own send rx packets");             // return received nbls.  if removed nbls chain, make             // sure chain isn't empty (i.e., netbufferlists!=null).             ndisfreturnnetbufferlists(open->adapterhandle, pnetbuflist, returnflags);         }          pnetbuflist = pnextnetbuflist;     } #else     // return received nbls.  if removed nbls chain, make     // sure chain isn't empty (i.e., netbufferlists!=null).     ndisfreturnnetbufferlists(open->adapterhandle, netbufferlists, returnflags); #endif      trace_exit(); } 

my log:

00010269    58.36443710 mdl 42   00010270    58.36444092 hahahahahahahahahahahaha:: sendtorxpath, open->adapterhandle=fffffa8003c24010, pnetbufferlist=68928096   00010271    58.36450577 --> npf_returnex     00010272    58.36450577     npf_returnex: hahahaha own send rx packets     00010273    58.36451340 <-- npf_returnex     00010274    59.04499054 --> npf_networkclassify  00010275    59.04499817 --> npf_ispacketselfsent     00010276    59.04499817 <-- npf_ispacketselfsent     00010277    59.04499817     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010278    59.04500961 <-- npf_networkclassify  00010279    59.04502869 --> npf_sendex   00010280    59.04503632 --> npf_sendcompleteex   00010281    59.04504395 <-- npf_sendcompleteex   00010282    59.04504395 <-- npf_sendex   00010283    59.04520798 --> npf_networkclassify  00010284    59.04520798 --> npf_ispacketselfsent     00010285    59.04521561 <-- npf_ispacketselfsent     00010286    59.04521561     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010287    59.04522324 <-- npf_networkclassify  00010288    59.04529953 --> npf_sendex   00010289    59.04529953 --> npf_sendcompleteex   00010290    59.04530716 <-- npf_sendcompleteex   00010291    59.04530716 <-- npf_sendex   00010292    59.04531097 --> npf_networkclassify  00010293    59.04531097 --> npf_ispacketselfsent     00010294    59.04531097 <-- npf_ispacketselfsent     00010295    59.04531860     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010296    59.04531860 <-- npf_networkclassify  00010297    59.04541397 --> npf_networkclassify  00010298    59.04541397 --> npf_ispacketselfsent     00010299    59.04541397 <-- npf_ispacketselfsent     00010300    59.04542160     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010301    59.04542160 <-- npf_networkclassify  00010302    59.04543304 --> npf_sendex   00010303    59.04543304 received on cpu 0  3     00010304    59.04544067 received on cpu 0  2     00010305    59.04544067 mdl 62   00010306    59.04544067 next mdl 24 , added    00010307    59.04544830 --> npf_sendcompleteex   00010308    59.04547882 <-- npf_sendcompleteex   00010309    59.04548645 <-- npf_sendex   00010310    59.04558563 --> npf_networkclassify  00010311    59.04558563 --> npf_ispacketselfsent     00010312    59.04558563 <-- npf_ispacketselfsent     00010313    59.04559326     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010314    59.04560089 <-- npf_networkclassify  00010315    59.04560471 --> npf_sendex   00010316    59.04561234 received on cpu 0  4     00010317    59.04561234 received on cpu 0  3     00010318    59.04561234 mdl 42   00010319    59.04561996 next mdl 24 , added    00010320    59.04561996 --> npf_sendcompleteex   00010321    59.04562378 <-- npf_sendcompleteex   00010322    59.04562378 <-- npf_sendex   00010323    59.15098953 --> npf_networkclassify  00010324    59.15098953 --> npf_ispacketselfsent     00010325    59.15099335 <-- npf_ispacketselfsent     00010326    59.15099335     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010327    59.15103912 <-- npf_networkclassify  00010328    59.15105820 --> npf_sendex   00010329    59.15106583 --> npf_sendcompleteex   00010330    59.15106583 <-- npf_sendcompleteex   00010331    59.15106583 <-- npf_sendex   00010332    59.15113449 --> npf_networkclassify  00010333    59.15113449 --> npf_ispacketselfsent     00010334    59.15114212 <-- npf_ispacketselfsent     00010335    59.15114212     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010336    59.15114975 <-- npf_networkclassify  00010337    59.15118027 --> npf_sendex   00010338    59.15118027 --> npf_sendcompleteex   00010339    59.15118790 <-- npf_sendcompleteex   00010340    59.15118790 <-- npf_sendex   00010341    59.15118790 --> npf_networkclassify  00010342    59.15119171 --> npf_ispacketselfsent     00010343    59.15119171 <-- npf_ispacketselfsent     00010344    59.15119171     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010345    59.15119934 <-- npf_networkclassify  00010346    59.15123367 --> npf_networkclassify  00010347    59.15123749 --> npf_ispacketselfsent     00010348    59.15123749 <-- npf_ispacketselfsent     00010349    59.15123749     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010350    59.15124512 <-- npf_networkclassify  00010351    59.15125275 --> npf_sendex   00010352    59.15125275 received on cpu 1  1     00010353    59.15125656 received on cpu 1  1     00010354    59.15132904 mdl 62   00010355    59.15133286 next mdl 24 , added    00010356    59.15134048 --> npf_sendcompleteex   00010357    59.15134811 <-- npf_sendcompleteex   00010358    59.15134811 <-- npf_sendex   00010359    59.15138626 --> npf_networkclassify  00010360    59.15138626 --> npf_ispacketselfsent     00010361    59.15138626 <-- npf_ispacketselfsent     00010362    59.15139389     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010363    59.15139771 <-- npf_networkclassify  00010364    59.15140533 --> npf_sendex   00010365    59.15140533 received on cpu 1  2     00010366    59.15140533 received on cpu 1  2     00010367    59.15141296 mdl 42   00010368    59.15141296 next mdl 24 , added    00010369    59.15141296 --> npf_sendcompleteex   00010370    59.15141678 <-- npf_sendcompleteex   00010371    59.15141678 <-- npf_sendex   00010372    59.19804001 --> npf_sendex   00010373    59.19805527 --> npf_sendcompleteex   00010374    59.19805527 <-- npf_sendcompleteex   00010375    59.19805527 <-- npf_sendex   00010376    59.35518646 --> npf_networkclassify  00010377    59.35519409 --> npf_ispacketselfsent     00010378    59.35519409 <-- npf_ispacketselfsent     00010379    59.35519409     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010380    59.35520554 <-- npf_networkclassify  00010381    59.35526276 --> npf_sendex   00010382    59.35527039 --> npf_sendcompleteex   00010383    59.35527802 <-- npf_sendcompleteex   00010384    59.35527802 <-- npf_sendex   00010385    59.35528183 --> npf_networkclassify  00010386    59.35528183 --> npf_ispacketselfsent     00010387    59.35528183 <-- npf_ispacketselfsent     00010388    59.35528946     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010389    59.35528946 <-- npf_networkclassify  00010390    59.35543823 --> npf_networkclassify  00010391    59.35543823 --> npf_ispacketselfsent     00010392    59.35543823 <-- npf_ispacketselfsent     00010393    59.35544205     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010394    59.35544968 <-- npf_networkclassify  00010395    59.35546112 --> npf_sendex   00010396    59.35546875 received on cpu 0  5     00010397    59.35546875 received on cpu 0  4     00010398    59.35547638 mdl 42   00010399    59.35547638 next mdl 50 , added    00010400    59.35548019 --> npf_sendcompleteex   00010401    59.35548019 <-- npf_sendcompleteex   00010402    59.35548782 <-- npf_sendex   00010403    59.47556305 --> npf_tapex    00010404    59.47557068 --> npf_returnex     00010405    59.47557068     npf_returnex: hahahaha not own send rx packets     00010406    59.47557068 <-- npf_returnex     00010407    59.47557831 <-- npf_tapex    00010408    59.58328247 --> npf_tapex    00010409    59.58329773 --> npf_returnex     00010410    59.58329773     npf_returnex: hahahaha not own send rx packets     00010411    59.58329773 <-- npf_returnex     00010412    59.58329773 <-- npf_tapex    00010413    59.58549881 --> npf_tapex    00010414    59.58550262 --> npf_returnex     00010415    59.58551025     npf_returnex: hahahaha not own send rx packets     00010416    59.58551025 <-- npf_returnex     00010417    59.58551025 <-- npf_tapex    00010418    60.11791992 --> npf_networkclassify  00010419    60.11791992 --> npf_ispacketselfsent     00010420    60.11792755 <-- npf_ispacketselfsent     00010421    60.11792755     npf_networkclassify: npf_networkclassify: npf_ispacketselfsent() [bselfsent: 0]  00010422    60.11793900 <-- npf_networkclassify  00010423    60.11795807 --> npf_sendex   00010424    60.11796570 received on cpu 0  6     00010425    60.11796570 received on cpu 0  5     00010426    60.11796951 mdl 42   00010427    60.11796951 next mdl 50 , added    

the correct using ndisfindicatereceivenetbufferlists based on msdn: https://msdn.microsoft.com/en-us/library/windows/hardware/ff570448(v=vs.85).aspx


Comments

Popular posts from this blog

php - Wordpress website dashboard page or post editor content is not showing but front end data is showing properly -

javascript - Get parameter of GET request -

javascript - Twitter Bootstrap - how to add some more margin between tooltip popup and element -